file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
full-site.js | $choicesModal.find('.modal-footer').html("");
var $firstButton;
for (var i in buttons) {
var btn = buttons[i];
var attrsString = "";
for (var key in btn.attrs) {
var value = btn.attrs[key];
attrsString += key + '="' + value + '" ';
}
var $button = $('<a target="_self" ' + attrsString + ' onclick="' + btn.clickAction + '">' + btn.textValue + '</a>');
if (!$firstButton) {
$firstButton = $button;
}
$choicesModal.find('.modal-footer').append($button);
}
$choicesModal.modal({keyboard: true});
$choicesModal.on('shown.bs.modal', function () {
if ($firstButton && window.location == window.parent.location) {
$firstButton.focus();
}
});
$choicesModal.modal('show');
$(".btnPrint").printPage();
$choicesModal.off('hidden.bs.modal');
$choicesModal.on('hidden.bs.modal', function (e) {
if (onCancelFunction)
onCancelFunction();
});
}
function htmlEncode(str) {
return str.replace(/</g, '<').replace(/>/g, '>').replace(/'/g, ''').replace(/"/g, '"');
}
function closeDialog() {
$('#choices-modal').modal('hide');
}
function userStateChange(data, triggerLoginEvent) {
var data = typeof data == "undefined" ? null : data;
// $('.alert-danger').remove();
$('.login-slid-div').slideUp(300);
if (data) {
if(data.user.avatar){
$(".userImage").html('<i><img src="/'+data.user.avatar+'" /></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i><img class="img-circle dev-profile-image" src="/'+data.user.avatar+'"/></i> '+data.user.username+'<span class="caret"></span>')
}else{
$(".userImage").html('<i class="fas fa-user-circle" ></i> ' + data.user.username + '<span class="caret"></span>');// responsive
$('.dev-user-profile').html('<i class="fas fa-user-circle fa-2x" style="margin-top: 5px;"></i> '+data.user.username+'<span class="caret"></span>')
}
$('.dev-anon-container').addClass('hide');
$('.dev-login-in').removeClass('hide');
// responsive
$('.userNotLogged').addClass('hide');
$('.userLogged').removeClass('hide');
if (data.user.materialCreate) {
$('.dev-material-create').removeClass('hide');
}
if (data.user.backend) {
$('.dev-backend-control').removeClass('hide');
}
if (data.user.comicsCreate) {
$('.dev-comics-create').removeClass('hide');
}
isLoggedIn = true;
if (triggerLoginEvent) {
$(window).trigger('user.loggedin');
}
$('.top-note').addClass('hidden');
for (var variableName in data.injectJSVariables) {
window[variableName] = data.injectJSVariables[variableName];
}
for (var fileId in data.injectFiles) {
loadScript(data.injectFiles[fileId], null, fileId);
onLogoutRemoveIds.push(fileId);
}
if (typeof afterLoginPerformAction === 'function') {
afterLoginPerformAction();
afterLoginPerformAction = null;
}
// if($('#login-popup').is(':visible')){
// lightcase.close();
// }
} else | }
}
function showAuthError(error) {
if (++failCount >= 3 || error.indexOf("Captcha") != -1) {
location.href = loginUrl;
} else {
showNotification('error',error);
// $('.dev-login-li').find('.alert').remove();
// $('.dev-login-li').prepend('<div class="alert alert-danger remove-5s">'
// + error + '</div>');
// if($('#ajax-form-login-resp').is(':visible')) $('#login-popup').lightcase('resize');
}
}
function SocialNetworkConnect(element) {
newWindow = window.open($(element).attr("data-url"), '', 'height=800, width=1000');
if (window.focus) {
newWindow.focus();
}
timer = setInterval(checkChild, 500);
}
function checkChild() {
if (errorMessage != false) {
if (newWindow.closed) {
msg = '<div class="alert alert-danger remove-5s">' + socialNetworkErrorMessage + '</div>';
if ($('.dev-login-li .alert').length > 0) {
$('.dev-login-li .alert').remove();
}
$('.dev-login-li').prepend(msg);
clearInterval(timer);
}
}
}
function show_email_modal() {
document.getElementById('form_email').value = "";
// $('#form_email').css('text-indent', '35px');
$('#form-modal .help-error').remove();
$('#form-modal .form-group').removeClass('is-invalid');
$('#form-modal').modal('show');
}
function getprayerTimeData() {
$.ajax({
url: getPrayerInfoUrl,
success: preparePrayerTimeWidget
});
}
// increaseFontSize and decreaseFontSize
var min = 16;
var max = 20;
function increaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != max) {
s += 1;
}
p[i].style.fontSize = s + "px"
}
}
function decreaseFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
if (p[i].style.fontSize) {
var s = parseInt(p[i].style.fontSize.replace("px", ""));
} else {
var s = 18;
}
if (s != min) {
s -= 1;
}
p[i].style.fontSize = s + "px"
}
}
function resetFontSize() {
var p = $('.details-text');
for (i = 0; i < p.length; i++) {
p[i].style.fontSize = "18px"
}
}
$('body').on('click','.largeFont',function () {
increaseFontSize();
});
$('body').on('click','.smallFont',function () {
decreaseFontSize();
});
$('body').on('click','.normalFont',function () {
resetFontSize();
});
function sharePopup(url, w, h) {
var left = (screen.width / 2) - (w / 2);
var top = (screen.height / 2) - (h / 2);
return window.open(url, "share window", 'toolbar=no, location=no, directories=no, status=no, menubar=no, scrollbars=yes, copyhistory=no, width=' + w + ', height=' + h + ', top=' + top + ', left=' + left);
}
function loginToChat() {
$.ajax({
url: chatLoginUrl,
success: function (data) {
if (reoloadPageForChat && data.loggedIn) {
window.location.reload(true);
return;
}
loadScript('https://repository.chatwee.com/scripts/72e4b84d2ef104b50494d305ab4bde88.js', null, 'chatwee-js-tag');
}
});
}
function logoutFromChat() {
$.ajax({
url: chatLogoutUrl,
success: function() {
$('#chatwee-js-tag').remove();
}
});
}
$(document).on('shown.bs.tab', 'a[data-toggle="tab"]',function (e) {
var target = $(e.target).attr("href") // activated tab
if(target=='#tab_default_2'){
setTimeout(function(){
initFormValidation() ;
},200)
}
});
jQuery(document).ready(function ($) {
// $(window).on('user.loggedin', loginToChat);
// $(window).on('user.loggedout', logoutFromChat);
$('form[name=searchForm]').submit(function (e) {
if (typeof inAngularLayout === 'undefined') {
e.preventDefault();
$(this).data('submitted', | {
$('.dev-user-profile').html("");
// $('[type="password"]').val("");
$('.dev-anon-container').removeClass('hide');
$('.dev-login-in').addClass('hide');
$('#dev-material-create').addClass('hide');
$('#dev-backend-control').addClass('hide');
$('#dev-comics-create').addClass('hide');
if (typeof timerNotificationsInterval !== 'undefined' && timerNotificationsInterval) {
clearInterval(timerNotificationsInterval);
}
var userStatusLognout = isLoggedIn;
isLoggedIn = false;
if (userStatusLognout) {
$(window).trigger('user.loggedout');
}
$('.top-note').removeClass('hidden');
for (var fileIdIndex in onLogoutRemoveIds) {
$('#' + onLogoutRemoveIds[fileIdIndex]).remove();
} | conditional_block |
embeddings.rs | a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) { | self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: | Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
| random_line_split |
embeddings.rs | > PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> |
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results | {
&self.indices
} | identifier_body |
embeddings.rs |
}
}
impl<'a> PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum BuilderError {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
| {
self.word.cmp(other.word)
} | conditional_block |
|
embeddings.rs | > PartialOrd for WordSimilarity<'a> {
fn partial_cmp(&self, other: &WordSimilarity) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<'a> Eq for WordSimilarity<'a> {}
impl<'a> PartialEq for WordSimilarity<'a> {
fn eq(&self, other: &WordSimilarity) -> bool {
self.cmp(other) == Ordering::Equal
}
}
#[derive(Debug, Fail)]
pub enum | {
#[fail(
display = "invalid embedding shape, expected: {}, got: {}",
expected_len, len
)]
InvalidEmbeddingLength { expected_len: usize, len: usize },
#[fail(display = "word not unique: {}", word)]
DuplicateWord { word: String },
}
/// Builder for word embedding matrices.
///
/// This builder can be used to construct an embedding matrix. The builder
/// does not assume that the number of embeddings is known ahead of time.
/// The embedding size is determined by the size of the first embedding that
/// is added. All subsequently added embeddings should have the same size.
pub struct Builder {
words: Vec<String>,
indices: HashMap<String, usize>,
embeddings: Vec<Array1<f32>>,
}
impl Builder {
/// Create a builder.
pub fn new() -> Self {
Builder {
words: Vec::new(),
indices: HashMap::new(),
embeddings: Vec::new(),
}
}
/// Construct the final embeddin matrix.
///
/// The `None` is returned when no embedding was added to the builder.
pub fn build(self) -> Option<Embeddings> {
let embed_len = self.embeddings.first()?.shape()[0];
let mut matrix = Array2::zeros((self.embeddings.len(), embed_len));
for (idx, embed) in self.embeddings.into_iter().enumerate() {
matrix.index_axis_mut(Axis(0), idx).assign(&embed);
}
Some(Embeddings {
embed_len,
indices: self.indices,
words: self.words,
matrix,
})
}
/// Add a new embedding to the builder.
///
/// An `Err` value is returned when the word has been inserted in the
/// builder before or when the embedding has a different size than
/// previously inserted embeddings.
pub fn push<S, E>(&mut self, word: S, embedding: E) -> Result<(), Error>
where
S: Into<String>,
E: Into<Array1<f32>>,
{
let word = word.into();
let embedding = embedding.into();
// Check that the embedding has the same length as embeddings that
// were inserted before.
if let Some(first) = self.embeddings.first() {
ensure!(
embedding.shape() == first.shape(),
BuilderError::InvalidEmbeddingLength {
expected_len: first.shape()[0],
len: embedding.shape()[0],
}
);
}
// Insert the word if it was not inserted before.
match self.indices.entry(word.to_owned()) {
Entry::Vacant(vacant) => vacant.insert(self.words.len()),
Entry::Occupied(_) => bail!(BuilderError::DuplicateWord { word: word }),
};
self.words.push(word.clone());
self.embeddings.push(embedding);
Ok(())
}
}
/// Word embeddings.
///
/// This data structure stores word embeddings (also known as *word vectors*)
/// and provides some useful methods on the embeddings, such as similarity
/// and analogy queries.
pub struct Embeddings {
matrix: Array2<f32>,
embed_len: usize,
indices: HashMap<String, usize>,
words: Vec<String>,
}
impl Embeddings {
/// Perform an analogy query.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
) -> Option<Vec<WordSimilarity>> {
self.analogy_by(word1, word2, word3, limit, |embeds, embed| {
embeds.dot(&embed)
})
}
/// Perform an analogy query using the given similarity function.
///
/// This method returns words that are close in vector space the analogy
/// query `word1` is to `word2` as `word3` is to `?`. More concretely,
/// it searches embeddings that are similar to:
///
/// *embedding(word2) - embedding(word1) + embedding(word3)*
///
/// At most, `limit` results are returned.
pub fn analogy_by<F>(
&self,
word1: &str,
word2: &str,
word3: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let embedding1 = self
.indices
.get(word1)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding2 = self
.indices
.get(word2)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding3 = self
.indices
.get(word3)
.map(|idx| self.matrix.index_axis(Axis(0), *idx).to_owned())?;
let embedding = (embedding2 - embedding1) + embedding3;
let skip = [word1, word2, word3].iter().cloned().collect();
Some(self.similarity_(embedding.view(), &skip, limit, similarity))
}
/// Get (a view of) the raw embedding matrix.
pub fn data(&self) -> ArrayView2<f32> {
self.matrix.view()
}
/// Return the length (in vector components) of the word embeddings.
pub fn embed_len(&self) -> usize {
self.embed_len
}
/// Get the embedding of a word.
pub fn embedding(&self, word: &str) -> Option<ArrayView1<f32>> {
self.indices
.get(word)
.map(|idx| self.matrix.index_axis(Axis(0), *idx))
}
/// Get the mapping from words to row indices of the embedding matrix.
pub fn indices(&self) -> &HashMap<String, usize> {
&self.indices
}
/// Get an iterator over pairs of words and the corresponding embeddings.
pub fn iter(&self) -> Iter {
Iter {
embeddings: self,
inner: self.words.iter().enumerate(),
}
}
/// Normalize the embeddings using their L2 (euclidean) norms.
///
/// **Note:** when you are using the output of e.g. word2vec, you should
/// normalize the embeddings to get good query results.
pub fn normalize(&mut self) {
for mut embedding in self.matrix.outer_iter_mut() {
let l2norm = embedding.dot(&embedding).sqrt();
if l2norm != 0f32 {
embedding /= l2norm;
}
}
}
/// Find words that are similar to the query word.
///
/// The similarity between two words is defined by the dot product of
/// the embeddings. If the vectors are unit vectors (e.g. by virtue of
/// calling `normalize`), this is the cosine similarity. At most, `limit`
/// results are returned.
pub fn similarity(&self, word: &str, limit: usize) -> Option<Vec<WordSimilarity>> {
self.similarity_by(word, limit, |embeds, embed| embeds.dot(&embed))
}
/// Find words that are similar to the query word using the given similarity
/// function.
///
/// The similarity function should return, given the embeddings matrix and
/// the word vector a vector of similarity scores. At most, `limit` results
/// are returned.
pub fn similarity_by<F>(
&self,
word: &str,
limit: usize,
similarity: F,
) -> Option<Vec<WordSimilarity>>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
self.indices.get(word).map(|idx| {
let embedding = self.matrix.index_axis(Axis(0), *idx);
let mut skip = HashSet::new();
skip.insert(word);
self.similarity_(embedding, &skip, limit, similarity)
})
}
fn similarity_<F>(
&self,
embed: ArrayView1<f32>,
skip: &HashSet<&str>,
limit: usize,
mut similarity: F,
) -> Vec<WordSimilarity>
where
F: FnMut(ArrayView2<f32>, ArrayView1<f32>) -> Array1<f32>,
{
let sims = similarity(self.matrix.view(), embed);
let mut results: | BuilderError | identifier_name |
utils.rs | Name) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::TopLevelReturn => (
"Module contains a top-level `return` statement. This causes the module to be wrapped in a function and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-top-level-return"
),
BailoutReason::Eval => (
"Module contains usage of `eval`. This causes the module to be wrapped in a function and minification to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-eval"
),
BailoutReason::NonStaticExports => (
"Non-static access of CommonJS `exports` object. This causes tree shaking to be disabled for the module.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeModule => (
"Unknown usage of CommonJS `module` object. This causes the module to be wrapped, and tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::FreeExports => (
"Unknown usage of CommonJS `exports` object. This causes tree shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#commonjs"
),
BailoutReason::ExportsReassignment => (
"Module contains a reassignment of the CommonJS `exports` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::ModuleReassignment => (
"Module contains a reassignment of the CommonJS `module` object. This causes the module to be wrapped and tree-shaking to be disabled.",
"https://parceljs.org/features/scope-hoisting/#avoid-module-and-exports-re-assignment"
),
BailoutReason::NonStaticDynamicImport => (
"Unknown dynamic import usage. This causes tree shaking to be disabled for the resolved module.", | "https://parceljs.org/features/scope-hoisting/#dynamic-imports"
), | random_line_split |
|
utils.rs | && &id.sym == idents.pop().unwrap() && !decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn | (specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or 'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level `require()` call. This causes the resolved module and all dependencies to be wrapped.",
"https://parceljs.org/features/scope-hoisting/#avoid-conditional-require()"
),
BailoutReason::NonStaticDestructuring => (
"Non-static destructuring of `require` or dynamic `import()`. This causes all exports of the resolved module to be included.",
| create_require | identifier_name |
utils.rs |
}
MemberProp::Ident(Ident { ref sym, .. }) => sym,
_ => return false,
};
if prop != expected {
return false;
}
match &*member.obj {
Expr::Member(m) => member = m,
Expr::Ident(id) => {
return idents.len() == 1 && &id.sym == idents.pop().unwrap() && !decls.contains(&id!(id));
}
_ => return false,
}
}
false
}
pub fn create_require(specifier: swc_core::ecma::atoms::JsWord) -> ast::CallExpr {
let mut normalized_specifier = specifier;
if normalized_specifier.starts_with("node:") {
normalized_specifier = normalized_specifier.replace("node:", "").into();
}
ast::CallExpr {
callee: ast::Callee::Expr(Box::new(ast::Expr::Ident(ast::Ident::new(
"require".into(),
DUMMY_SP,
)))),
args: vec![ast::ExprOrSpread {
expr: Box::new(ast::Expr::Lit(ast::Lit::Str(normalized_specifier.into()))),
spread: None,
}],
span: DUMMY_SP,
type_args: None,
}
}
fn is_marked(span: Span, mark: Mark) -> bool {
let mut ctxt = span.ctxt();
loop {
let m = ctxt.remove_mark();
if m == Mark::root() {
return false;
}
if m == mark {
return true;
}
}
}
pub fn match_str(node: &ast::Expr) -> Option<(JsWord, Span)> {
use ast::*;
match node {
// "string" or 'string'
Expr::Lit(Lit::Str(s)) => Some((s.value.clone(), s.span)),
// `string`
Expr::Tpl(tpl) if tpl.quasis.len() == 1 && tpl.exprs.is_empty() => {
Some(((*tpl.quasis[0].raw).into(), tpl.span))
}
_ => None,
}
}
pub fn match_property_name(node: &ast::MemberExpr) -> Option<(JsWord, Span)> {
match &node.prop {
ast::MemberProp::Computed(s) => match_str(&s.expr),
ast::MemberProp::Ident(id) => Some((id.sym.clone(), id.span)),
ast::MemberProp::PrivateName(_) => None,
}
}
pub fn match_export_name(name: &ast::ModuleExportName) -> (JsWord, Span) {
match name {
ast::ModuleExportName::Ident(id) => (id.sym.clone(), id.span),
ast::ModuleExportName::Str(s) => (s.value.clone(), s.span),
}
}
/// Properties like `ExportNamedSpecifier::orig` have to be an Ident if `src` is `None`
pub fn match_export_name_ident(name: &ast::ModuleExportName) -> &ast::Ident {
match name {
ast::ModuleExportName::Ident(id) => id,
ast::ModuleExportName::Str(_) => unreachable!(),
}
}
pub fn match_require(node: &ast::Expr, decls: &HashSet<Id>, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Expr(expr) => match &**expr {
Expr::Ident(ident) => {
if ident.sym == js_word!("require")
&& !decls.contains(&(ident.sym.clone(), ident.span.ctxt))
&& !is_marked(ident.span, ignore_mark)
{
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
Expr::Member(member) => {
if match_member_expr(member, vec!["module", "require"], decls) {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
}
None
}
_ => None,
},
_ => None,
},
_ => None,
}
}
pub fn match_import(node: &ast::Expr, ignore_mark: Mark) -> Option<JsWord> {
use ast::*;
match node {
Expr::Call(call) => match &call.callee {
Callee::Import(ident) if !is_marked(ident.span, ignore_mark) => {
if let Some(arg) = call.args.get(0) {
return match_str(&arg.expr).map(|(name, _)| name);
}
None
}
_ => None,
},
_ => None,
}
}
// `name` must not be an existing binding.
pub fn create_global_decl_stmt(
name: swc_core::ecma::atoms::JsWord,
init: ast::Expr,
global_mark: Mark,
) -> (ast::Stmt, SyntaxContext) {
// The correct value would actually be `DUMMY_SP.apply_mark(Mark::fresh(Mark::root()))`.
// But this saves us from running the resolver again in some cases.
let span = DUMMY_SP.apply_mark(global_mark);
(
ast::Stmt::Decl(ast::Decl::Var(Box::new(ast::VarDecl {
kind: ast::VarDeclKind::Var,
declare: false,
span: DUMMY_SP,
decls: vec![ast::VarDeclarator {
name: ast::Pat::Ident(ast::BindingIdent::from(ast::Ident::new(name, span))),
span: DUMMY_SP,
definite: false,
init: Some(Box::new(init)),
}],
}))),
span.ctxt,
)
}
pub fn get_undefined_ident(unresolved_mark: Mark) -> ast::Ident {
ast::Ident::new(js_word!("undefined"), DUMMY_SP.apply_mark(unresolved_mark))
}
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
/// Corresponds to the JS SourceLocation type (1-based, end exclusive)
pub struct SourceLocation {
pub start_line: usize,
pub start_col: usize,
pub end_line: usize,
pub end_col: usize,
}
impl SourceLocation {
pub fn from(source_map: &swc_core::common::SourceMap, span: swc_core::common::Span) -> Self {
if span.lo.is_dummy() || span.hi.is_dummy() {
return SourceLocation {
start_line: 1,
start_col: 1,
end_line: 1,
end_col: 2,
};
}
let start = source_map.lookup_char_pos(span.lo);
let end = source_map.lookup_char_pos(span.hi);
// SWC's columns are exclusive, ours are exclusive
// SWC has 0-based columns, ours are 1-based (column + 1)
SourceLocation {
start_line: start.line,
start_col: start.col_display + 1,
end_line: end.line,
end_col: end.col_display + 1,
}
}
}
impl PartialOrd for SourceLocation {
fn partial_cmp(&self, other: &SourceLocation) -> Option<Ordering> {
match self.start_line.cmp(&other.start_line) {
Ordering::Equal => self.start_col.partial_cmp(&other.start_col),
o => Some(o),
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct CodeHighlight {
pub message: Option<String>,
pub loc: SourceLocation,
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Diagnostic {
pub message: String,
pub code_highlights: Option<Vec<CodeHighlight>>,
pub hints: Option<Vec<String>>,
pub show_environment: bool,
pub severity: DiagnosticSeverity,
pub documentation_url: Option<String>,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum DiagnosticSeverity {
/// Fails the build with an error.
Error,
/// Logs a warning, but the build does not fail.
Warning,
/// An error if this is source code in the project, or a warning if in node_modules.
SourceError,
}
#[derive(Serialize, Debug, Deserialize, Eq, PartialEq, Clone, Copy)]
pub enum SourceType {
Script,
Module,
}
#[derive(Debug)]
pub struct Bailout {
pub loc: SourceLocation,
pub reason: BailoutReason,
}
impl Bailout {
pub fn to_diagnostic(&self) -> Diagnostic {
let (message, documentation_url) = self.reason.info();
Diagnostic {
message: message.into(),
documentation_url: Some(documentation_url.into()),
code_highlights: Some(vec![CodeHighlight {
loc: self.loc.clone(),
message: None,
}]),
show_environment: false,
severity: DiagnosticSeverity::Warning,
hints: None,
}
}
}
#[derive(Debug, Eq, PartialEq)]
pub enum BailoutReason {
NonTopLevelRequire,
NonStaticDestructuring,
TopLevelReturn,
Eval,
NonStaticExports,
FreeModule,
FreeExports,
ExportsReassignment,
ModuleReassignment,
NonStaticDynamicImport,
NonStaticAccess,
}
impl BailoutReason {
fn info(&self) -> (&str, &str) {
match self {
BailoutReason::NonTopLevelRequire => (
"Conditional or non-top-level | {
return false;
} | conditional_block |
|
inotify_linux_2.go | of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block
const useSelect = true
// Use non-block, so we don't block on read.
const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string | }
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := | {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE") | identifier_body |
inotify_linux_2.go | handling of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block | const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new | const useSelect = true
// Use non-block, so we don't block on read. | random_line_split |
inotify_linux_2.go | of moves, etc.
import (
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/ugorji/go-common/errorutil"
"github.com/ugorji/go-common/logging"
)
var log = logging.PkgLogger()
var ClosedErr = errorutil.String("<watcher closed>")
// Use select, so read doesn't block
const useSelect = true
// Use non-block, so we don't block on read.
const useNonBlock = true
type WatchEvent struct {
syscall.InotifyEvent
Path string
Name string
}
func (e *WatchEvent) String() string {
s := make([]string, 0, 4)
x := e.Mask
if x&syscall.IN_ISDIR != 0 {
s = append(s, "IN_ISDIR")
}
if x&syscall.IN_CREATE != 0 {
s = append(s, "IN_CREATE")
}
if x&syscall.IN_CLOSE_WRITE != 0 {
s = append(s, "IN_CLOSE_WRITE")
}
if x&syscall.IN_MOVED_TO != 0 {
s = append(s, "IN_MOVED_TO")
}
if x&syscall.IN_MOVED_FROM != 0 {
s = append(s, "IN_MOVED_FROM")
}
if x&syscall.IN_DELETE != 0 {
s = append(s, "IN_DELETE")
}
if x&syscall.IN_DELETE_SELF != 0 {
s = append(s, "IN_DELETE_SELF")
}
if x&syscall.IN_MOVE_SELF != 0 {
s = append(s, "IN_MOVE_SELF")
}
return fmt.Sprintf("WatchEvent: Path: %s, Name: %s, Wd: %v, Cookie: %v, Mask: %b, %v",
e.Path, e.Name, e.Wd, e.Cookie, e.Mask, s)
}
// Watcher implements a watch service.
// It allows user to handle events natively, but does
// management of event bus and delivery.
// User just provides a callback function.
type Watcher struct {
sl time.Duration
fd int
closed uint32
wds map[int32]string
flags map[string]uint32
mu sync.Mutex
fn func([]*WatchEvent)
ev chan []*WatchEvent
sysbufsize int
}
// NewWatcher returns a new Watcher instance.
// - bufsize: chan size (ie max number of batches available to be processed)
// - sysBufSize: syscall Inotify Buf size (ie max number of inotify events in each read)
// - sleepTime: sleep time between reads.
// Allows system coalese events, and allows us user handle events in batches.
// - fn: function to call for each batch of events read
func NewWatcher(bufsize, sysBufSize int, sleepTime time.Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) | (fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := | Add | identifier_name |
inotify_linux_2.go | .Duration, fn func([]*WatchEvent),
) (w *Watcher, err error) {
fd, err := syscall.InotifyInit()
if err != nil {
return
}
if fd == -1 {
err = os.NewSyscallError("inotify_init", err)
return
}
if useNonBlock {
syscall.SetNonblock(fd, true)
}
w = &Watcher{
fd: fd,
fn: fn,
ev: make(chan []*WatchEvent, bufsize),
wds: make(map[int32]string),
flags: make(map[string]uint32),
sl: sleepTime,
sysbufsize: sysBufSize,
}
go w.readEvents()
go w.handleEvents()
return
}
func (w *Watcher) AddAll(ignoreErr, clear, recursive bool, flags uint32, fpaths ...string) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
var merr errorutil.Multi
fnErr := func(err error, message string, params ...interface{}) bool {
if ignoreErr {
log.IfError(nil, err, message, params...)
return false
} else {
errorutil.OnErrorf(&err, message, params...)
merr = append(merr, err)
return true
}
}
if clear && fnErr(w.clear(), "Error clearing Watcher") {
return merr.NonNilError()
}
for _, fpath := range fpaths {
var walkDoneErr = errorutil.String("DONE")
first := true
walkFn := func(f string, info os.FileInfo, inerr error) error {
if first || info.Mode().IsDir() {
if fnErr(w.add(f, flags), "Error adding path: %s", f) {
return walkDoneErr
}
}
if first && !recursive {
return walkDoneErr
}
first = false
return nil
}
if err := filepath.Walk(fpath, walkFn); err == walkDoneErr {
break
}
}
return merr.NonNilError()
}
func (w *Watcher) Add(fpath string, flags uint32) error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.add(fpath, flags)
}
func (w *Watcher) add(fpath string, flags uint32) error {
if flags == 0 {
flags =
// delete: false
syscall.IN_CREATE |
syscall.IN_CLOSE_WRITE |
syscall.IN_MOVED_TO |
// delete: true
syscall.IN_MOVED_FROM |
syscall.IN_DELETE |
syscall.IN_DELETE_SELF |
syscall.IN_MOVE_SELF
}
// user can add more flags by passing the syscall.IN_MASK_ADD
wd, err := syscall.InotifyAddWatch(w.fd, fpath, flags)
if err != nil {
errorutil.OnErrorf(&err, "Error adding watch for path: %s", fpath)
return err
}
w.wds[int32(wd)] = fpath
w.flags[fpath] = flags
return nil
}
func (w *Watcher) Remove(fpath string) (err error) {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(fpath)
}
func (w *Watcher) remove(fpath string) (err error) {
for k, v := range w.wds {
if v == fpath {
_, err = syscall.InotifyRmWatch(w.fd, uint32(k))
delete(w.wds, k)
delete(w.flags, v)
break
}
}
return
}
func (w *Watcher) Clear() error {
if atomic.LoadUint32(&w.closed) != 0 {
return ClosedErr
}
w.mu.Lock()
defer w.mu.Unlock()
return w.clear()
}
func (w *Watcher) clear() error {
var merr errorutil.Multi
for k, v := range w.wds {
if _, err := syscall.InotifyRmWatch(w.fd, uint32(k)); err != nil {
errorutil.OnErrorf(&err, "Error removing watch for path: %s", v)
merr = append(merr, err)
}
}
w.wds = make(map[int32]string)
w.flags = make(map[string]uint32)
return merr.NonNilError()
}
func (w *Watcher) Close() (err error) {
// Note that, with blocking read, Close is best effort. This is because read in linux
// does not error if the file descriptor is closed. Thus the "read" syscall may not unblock.
//
// To mitigate, we use select AND NonBlocking IO during the read.
if !atomic.CompareAndSwapUint32(&w.closed, 0, 1) {
return
}
w.mu.Lock()
defer w.mu.Unlock()
w.clear()
close(w.ev)
if !(useSelect || useNonBlock) {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}
return nil
}
func (w *Watcher) readEvents() {
if useSelect || useNonBlock {
defer func() {
log.IfError(nil, syscall.Close(w.fd), "Error closing Watcher")
}()
}
// inotify events come very quickly, so we can't handle them inline.
// Instead, we grab the list of events we read and put on a queue.
// This way, we can still work on a bunch of events at same time.
var buf = make([]byte, syscall.SizeofInotifyEvent*w.sysbufsize)
for {
// always check closed right before syscalls (read/select/sleep), to minimize chance
// of race condition where fd is closed, OS assigns to someone else, and we try to read.
// slight sleep gives a chance to coalese similar events into one
if w.sl != 0 {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
time.Sleep(w.sl)
}
if useSelect {
if atomic.LoadUint32(&w.closed) != 0 {
return
}
// println(">>>>> select: Checking to read")
fdset := new(syscall.FdSet)
fdset.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) // FD_SET
// fdIsSet := (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) != 0 // FD_ISSET
// for i := range fdset.Bits { fdset.Bits[i] = 0 } // FD_ZERO
selTimeout := syscall.NsecToTimeval(int64(1 * time.Second))
num, err := syscall.Select(w.fd+1, fdset, nil, nil, &selTimeout)
// if err != nil || num == 0 {
if (fdset.Bits[w.fd/64] & (1 << (uint(w.fd) % 64))) == 0 { // FD_ISSET
log.IfError(nil, err, "Error during Watcher select, which returned: %d", num)
continue
}
// println(">>>>> select: will read")
}
if atomic.LoadUint32(&w.closed) != 0 {
return
}
n, err := syscall.Read(w.fd, buf[0:])
if useNonBlock && err == syscall.EAGAIN {
// println(">>>>> non-block: EAGAIN")
continue
}
// even if there is an error, see if any events already read and process them.
log.IfError(nil, err, "Error during Watcher read, which returned %d bytes", n)
if n == 0 {
break // EOF
}
if n < syscall.SizeofInotifyEvent {
continue // short read
}
var offset uint32
wevs := make([]*WatchEvent, 0, n/(syscall.SizeofInotifyEvent*2))
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
// raw.Wd, raw.Mask, raw.Cookie, raw.Len (all uint32)
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
fpath := w.wds[raw.Wd]
// skip some events
if raw.Mask&syscall.IN_IGNORED != 0 ||
raw.Mask&syscall.IN_Q_OVERFLOW != 0 ||
raw.Mask&syscall.IN_UNMOUNT != 0 ||
fpath == "" {
offset += syscall.SizeofInotifyEvent + raw.Len
continue
}
wev := &WatchEvent{InotifyEvent: *raw, Path: fpath}
if raw.Len != 0 | {
bs := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
wev.Name = strings.TrimRight(string(bs[0:raw.Len]), "\000")
} | conditional_block |
|
utils.go | .Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
}
if len(taskResp.Containers) == 0 {
return "", errors.New("No containers in task response")
}
for _, container := range taskResp.Containers {
if container.Name == containerName {
return container.DockerId, nil
}
}
return "", errors.New("No containers matched given name")
}
func (agent *TestAgent) WaitStoppedViaIntrospection(task *TestTask) (bool, error) {
var err error
var isStopped bool
for i := 0; i < 5; i++ {
isStopped, err = agent.waitStoppedViaIntrospection(task)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return isStopped, err
}
func (agent *TestAgent) waitStoppedViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "STOPPED" {
return true, nil
} else {
return false, errors.New("Task should be STOPPED but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) WaitRunningViaIntrospection(task *TestTask) (bool, error) {
var err error
var isRunning bool
for i := 0; i < 5; i++ {
isRunning, err = agent.waitRunningViaIntrospection(task)
if err == nil && isRunning {
break
}
time.Sleep(10000 * time.Millisecond)
}
return isRunning, err
}
func (agent *TestAgent) waitRunningViaIntrospection(task *TestTask) (bool, error) {
rawResponse, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return false, err
}
var taskResp handlers.TaskResponse
err = json.Unmarshal(*rawResponse, &taskResp)
if taskResp.KnownStatus == "RUNNING" {
return true, nil
} else {
return false, errors.New("Task should be RUNNING but is " + taskResp.KnownStatus)
}
}
func (agent *TestAgent) callTaskIntrospectionApi(taskArn string) (*[]byte, error) {
fullIntrospectionApiURL := agent.IntrospectionURL + "/v1/tasks"
if taskArn != "" {
fullIntrospectionApiURL += "?taskarn=" + taskArn
}
agentTasksResp, err := http.Get(fullIntrospectionApiURL)
if err != nil {
return nil, err
}
bodyData, err := ioutil.ReadAll(agentTasksResp.Body)
if err != nil {
return nil, err
}
return &bodyData, nil
}
func (agent *TestAgent) RequireVersion(version string) {
if agent.Version == "UNKNOWN" {
agent.t.Skipf("Skipping test requiring version %v; agent version unknown", version)
}
matches, err := Version(agent.Version).Matches(version)
if err != nil {
agent.t.Skipf("Skipping test requiring version %v; could not compare because of error: %v", version, err)
}
if !matches {
agent.t.Skipf("Skipping test requiring version %v; agent version %v", version, agent.Version)
}
}
type TestTask struct {
*ecs.Task
}
func (task *TestTask) Redescribe() {
res, err := ECS.DescribeTasks(&ecs.DescribeTasksInput{
Cluster: task.ClusterArn,
Tasks: []*string{task.TaskArn},
})
if err == nil && len(res.Failures) == 0 {
task.Task = res.Tasks[0]
}
}
func (task *TestTask) waitStatus(timeout time.Duration, status string) error {
timer := time.NewTimer(timeout)
atStatus := make(chan error, 1)
cancelled := false
go func() {
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
for *task.LastStatus != status && !cancelled {
task.Redescribe()
if *task.LastStatus == status {
break
}
if *task.LastStatus == "STOPPED" && status != "STOPPED" {
atStatus <- errors.New("Task terminal; will never reach " + status)
return
}
time.Sleep(5 * time.Second)
}
atStatus <- nil
}()
select {
case err := <-atStatus:
return err
case <-timer.C:
cancelled = true
return errors.New("Timed out waiting for task to reach " + status + ": " + *task.TaskDefinitionArn + ", " + *task.TaskArn)
}
}
func (task *TestTask) ContainerExitcode(name string) (int, bool) {
for _, cont := range task.Containers {
if cont != nil && cont.Name != nil && cont.ExitCode != nil {
if *cont.Name == name {
return int(*cont.ExitCode), true
}
}
}
return 0, false
}
func (task *TestTask) | WaitRunning | identifier_name |
|
utils.go | (registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 |
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
| {
agent.Version = string(versionNumberStr[1])
} | conditional_block |
utils.go | (registerRequest)
if err != nil {
return "", err
}
return fmt.Sprintf("%s:%d", *registered.TaskDefinition.Family, *registered.TaskDefinition.Revision), nil
}
type TestAgent struct {
Image string
DockerID string
IntrospectionURL string
Version string
ContainerInstanceArn string
Cluster string
TestDir string
Logdir string
Options *AgentOptions
DockerClient *docker.Client
t *testing.T
}
type AgentOptions struct {
ExtraEnvironment map[string]string
ContainerLinks []string
}
// RunAgent launches the agent and returns an object which may be used to reference it.
// It will wait until the agent is correctly registered before returning.
// 'version' may be a docker image (e.g. amazon/amazon-ecs-agent:v1.0.0) with
// tag that may be used to run the agent. It defaults to
// 'amazon/amazon-ecs-agent:make', the version created locally by running
// 'make'
func RunAgent(t *testing.T, options *AgentOptions) *TestAgent {
agent := &TestAgent{t: t}
agentImage := "amazon/amazon-ecs-agent:make"
if envImage := os.Getenv("ECS_AGENT_IMAGE"); envImage != "" {
agentImage = envImage
}
agent.Image = agentImage
dockerClient, err := docker.NewClientFromEnv()
if err != nil {
t.Fatal(err)
}
agent.DockerClient = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 {
agent.Version = string(versionNumberStr[1])
}
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) |
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
| {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
} | identifier_body |
utils.go | = dockerClient
_, err = dockerClient.InspectImage(agentImage)
if err != nil {
err = dockerClient.PullImage(docker.PullImageOptions{Repository: agentImage}, docker.AuthConfiguration{})
if err != nil {
t.Fatal("Could not launch agent", err)
}
}
tmpdirOverride := os.Getenv("ECS_FTEST_TMP")
agentTempdir, err := ioutil.TempDir(tmpdirOverride, "ecs_integ_testdata")
if err != nil {
t.Fatal("Could not create temp dir for test")
}
logdir := filepath.Join(agentTempdir, "logs")
datadir := filepath.Join(agentTempdir, "data")
os.Mkdir(logdir, 0755)
os.Mkdir(datadir, 0755)
agent.TestDir = agentTempdir
agent.Options = options
if options == nil {
agent.Options = &AgentOptions{}
}
t.Logf("Created directory %s to store test data in", agentTempdir)
err = agent.StartAgent()
if err != nil {
t.Fatal(err)
}
return agent
}
func (agent *TestAgent) StopAgent() error {
return agent.DockerClient.StopContainer(agent.DockerID, 10)
}
func (agent *TestAgent) StartAgent() error {
agent.t.Logf("Launching agent with image: %s\n", agent.Image)
logdir := filepath.Join(agent.TestDir, "logs")
datadir := filepath.Join(agent.TestDir, "data")
agent.Logdir = logdir
dockerConfig := &docker.Config{
Image: agent.Image,
ExposedPorts: map[docker.Port]struct{}{
"51678/tcp": struct{}{},
},
Env: []string{
"ECS_CLUSTER=" + Cluster,
"ECS_DATADIR=/data",
"ECS_LOGLEVEL=debug",
"ECS_LOGFILE=/logs/integ_agent.log",
"ECS_BACKEND_HOST=" + os.Getenv("ECS_BACKEND_HOST"),
"AWS_ACCESS_KEY_ID=" + os.Getenv("AWS_ACCESS_KEY_ID"),
"AWS_DEFAULT_REGION=" + *ECS.Config.Region,
"AWS_SECRET_ACCESS_KEY=" + os.Getenv("AWS_SECRET_ACCESS_KEY"),
"ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=" + os.Getenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
},
Cmd: strings.Split(os.Getenv("ECS_FTEST_AGENT_ARGS"), " "),
}
hostConfig := &docker.HostConfig{
Binds: []string{
"/var/run/docker.sock:/var/run/docker.sock",
logdir + ":/logs",
datadir + ":/data",
},
PortBindings: map[docker.Port][]docker.PortBinding{
"51678/tcp": []docker.PortBinding{docker.PortBinding{HostIP: "0.0.0.0"}},
},
Links: agent.Options.ContainerLinks,
}
if agent.Options != nil {
for key, value := range agent.Options.ExtraEnvironment {
dockerConfig.Env = append(dockerConfig.Env, key+"="+value)
}
}
agentContainer, err := agent.DockerClient.CreateContainer(docker.CreateContainerOptions{
Config: dockerConfig,
HostConfig: hostConfig,
})
if err != nil {
agent.t.Fatal("Could not create agent container", err)
}
agent.DockerID = agentContainer.ID
agent.t.Logf("Agent started as docker container: %s\n", agentContainer.ID)
err = agent.DockerClient.StartContainer(agentContainer.ID, nil)
if err != nil {
return errors.New("Could not start agent container " + err.Error())
}
containerMetadata, err := agent.DockerClient.InspectContainer(agentContainer.ID)
if err != nil {
return errors.New("Could not inspect agent container: " + err.Error())
}
agent.IntrospectionURL = "http://localhost:" + containerMetadata.NetworkSettings.Ports["51678/tcp"][0].HostPort
// Wait up to 10s for it to register
var localMetadata handlers.MetadataResponse
for i := 0; i < 10; i++ {
func() {
agentMetadataResp, err := http.Get(agent.IntrospectionURL + "/v1/metadata")
if err != nil {
return
}
metadata, err := ioutil.ReadAll(agentMetadataResp.Body)
if err != nil {
return
}
json.Unmarshal(metadata, &localMetadata)
}()
if localMetadata.ContainerInstanceArn != nil && *localMetadata.ContainerInstanceArn != "" {
break
}
time.Sleep(1 * time.Second)
}
if localMetadata.ContainerInstanceArn == nil {
agent.DockerClient.StopContainer(agent.DockerID, 1)
return errors.New("Could not get agent metadata after launching it")
}
agent.ContainerInstanceArn = *localMetadata.ContainerInstanceArn
agent.Cluster = localMetadata.Cluster
if localMetadata.Version != "" {
versionNumberRegex := regexp.MustCompile(` v(\d+\.\d+\.\d+) `)
versionNumberStr := versionNumberRegex.FindStringSubmatch(localMetadata.Version)
if len(versionNumberStr) == 2 {
agent.Version = string(versionNumberStr[1])
}
}
if agent.Version == "" {
agent.Version = "UNKNOWN"
}
agent.t.Logf("Found agent metadata: %+v", localMetadata)
return nil
}
func (agent *TestAgent) Cleanup() {
agent.StopAgent()
if agent.t.Failed() {
agent.t.Logf("Preserving test dir for failed test %s", agent.TestDir)
} else {
agent.t.Logf("Removing test dir for passed test %s", agent.TestDir)
os.RemoveAll(agent.TestDir)
}
ECS.DeregisterContainerInstance(&ecs.DeregisterContainerInstanceInput{
Cluster: &agent.Cluster,
ContainerInstance: &agent.ContainerInstanceArn,
Force: aws.Bool(true),
})
}
func (agent *TestAgent) StartMultipleTasks(t *testing.T, task string, num int) ([]*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
cis := make([]*string, num)
for i := 0; i < num; i++ {
cis[i] = &agent.ContainerInstanceArn
}
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: cis,
TaskDefinition: &td,
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
testTasks := make([]*TestTask, num)
for i, task := range resp.Tasks {
agent.t.Logf("Started task: %s\n", *task.TaskArn)
testTasks[i] = &TestTask{task}
}
return testTasks, nil
}
func (agent *TestAgent) StartTask(t *testing.T, task string) (*TestTask, error) {
tasks, err := agent.StartMultipleTasks(t, task, 1)
if err != nil {
return nil, err
}
return tasks[0], nil
}
func (agent *TestAgent) StartTaskWithOverrides(t *testing.T, task string, overrides []*ecs.ContainerOverride) (*TestTask, error) {
td, err := GetTaskDefinition(task)
if err != nil {
return nil, err
}
t.Logf("Task definition: %s", td)
resp, err := ECS.StartTask(&ecs.StartTaskInput{
Cluster: &agent.Cluster,
ContainerInstances: []*string{&agent.ContainerInstanceArn},
TaskDefinition: &td,
Overrides: &ecs.TaskOverride{
ContainerOverrides: overrides,
},
})
if err != nil {
return nil, err
}
if len(resp.Failures) != 0 || len(resp.Tasks) == 0 {
return nil, errors.New("Failure starting task: " + *resp.Failures[0].Reason)
}
agent.t.Logf("Started task: %s\n", *resp.Tasks[0].TaskArn)
return &TestTask{resp.Tasks[0]}, nil
}
// ResolveTaskDockerID determines the Docker ID for a container within a given
// task that has been run by the Agent.
func (agent *TestAgent) ResolveTaskDockerID(task *TestTask, containerName string) (string, error) {
var err error
var dockerId string
for i := 0; i < 5; i++ {
dockerId, err = agent.resolveTaskDockerID(task, containerName)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return dockerId, err
}
func (agent *TestAgent) resolveTaskDockerID(task *TestTask, containerName string) (string, error) {
bodyData, err := agent.callTaskIntrospectionApi(*task.TaskArn)
if err != nil {
return "", err
}
var taskResp handlers.TaskResponse | err = json.Unmarshal(*bodyData, &taskResp)
if err != nil {
return "", err
} | random_line_split |
|
script.js | {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function check |
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma k | ButtonDisabled() {
| identifier_name |
script.js | 1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma kota');
alaStr.alaToOla = function () {
if (this.string.includes('Ala')) {
this.string = this.string.replaceAll('Ala', 'Ola');
console.log(this.string);
} else {
const div = document.createElement('div');
div.innerText = 'Słowo Ala nie występuje w tekście.';
document.body.appendChild(div);
}
}
alaStr.alaToOla();
// 13 zadanie
function countForMe(stringArr){
var howManyLetters = [];
for(var i = 0; i < stringArr.length; i++){
how | ManyLetters[i] = stringArr[i].length;
}
return h | conditional_block |
|
script.js | {
if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
|
function moreFields(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
document.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma k | var kidsTab = [];
| random_line_split |
script.js | if (x % 2 === 1) {
myList.removeChild(el);
}
});
});
// 4 zadanie
const button = document.createElement('button');
button.innerText = 'Click to remove';
button.addEventListener('click', (e) => {
e.target.remove();
});
document.body.appendChild(button);
// 5 zadanie
const rand = Math.floor(Math.random() * 20);
for (let i = 0; i < rand; i++) {
const randDiv = document.createElement('div');
randDiv.innerText = `to jest div numer ${i}`;
document.body.appendChild(randDiv);
}
// 6 zadanie
const myNewObjStr = {
div1: 'to jest div1',
span1: 'to jest span1',
div2: {
div3: 'to jest div3(2)',
},
span2: 'to jest span2'
}
const r00t = document.getElementById('root');
const firstDiv = document.createElement('div');
firstDiv.innerText = myNewObjStr.div1;
const firstSpan = document.createElement('span');
firstSpan.innerText = myNewObjStr.span1;
firstDiv.append(firstSpan);
document.body.append(firstDiv, r00t);
const secondDiv = document.createElement('div');
const thirdDiv = document.createElement('div');
thirdDiv.innerText = myNewObjStr.div2.div3;
const secondSpan = document.createElement('span');
secondSpan.innerText = myNewObjStr.span2;
secondDiv.append(thirdDiv);
secondDiv.append(secondSpan);
document.body.append(secondDiv, r00t);
// 7 zadanie
const favFruits = ['arbuz', 'granat', 'mango', 'banan', 'banan', 'pomarańcza', 'wiśnia'];
const ul1 = document.createElement("ul");
const ul2 = document.createElement("ul");
favFruits.forEach(v => {
const li = document.createElement("li");
li.innerText = v;
ul1.appendChild(li);
});
const lists = [ul1, ul2];
const buttons = [document.createElement("button"), document.createElement("button")];
function checkButtonDisabled() {
lists.forEach((ul, i) => {
if (ul.childElementCount <= 1) {
buttons[i].disabled = true;
} else {
buttons[i].disabled = false;
}
})
}
lists.forEach((ul, i) => {
buttons[i].innerText = 'MOVE';
buttons[i].addEventListener('click', () => {
const listItems = ul.querySelectorAll('li');
const childToTransfer = listItems[listItems.length - 1];
if (i === 0) {
ul2.insertBefore(childToTransfer, buttons[1]);
} else {
ul1.insertBefore(childToTransfer, buttons[0]);
}
checkButtonDisabled();
});
ul.appendChild(buttons[i]);
document.body.appendChild(ul);
});
checkButtonDisabled();
// 8 zadanie
const fieldset = document.createElement('fieldset');
const inputs = [{
label: 'Element',
id: 'el',
type: 'text'
}, {
label: 'Tekst',
id: 'text',
type: 'text'
}, {
label: 'Kolor',
id: 'color',
type: 'color'
}, {
label: 'Ile razy',
id: 'count',
type: 'number'
}, {
label: 'Utwórz',
type: 'submit'
}];
inputs.forEach(v => {
const elInput = document.createElement('input');
let label = document.createElement('hr');
elInput.style.display = 'block';
elInput.type = v.type;
elInput.id = v.id || null;
if (v.type === 'submit') {
elInput.value = v.label;
elInput.addEventListener('click', (e) => {
createElement(e);
});
} else {
label = document.createElement('label');
label.innerText = v.label;
label.for = v.id;
}
fieldset.appendChild(label);
fieldset.appendChild(elInput);
});
function createElement(e) {
e.preventDefault();
let el = null;
inputs.forEach((v) => {
const value = document.getElementById(v.id)?.value;
switch (v.id) {
case 'el': el = document.createElement(value); break;
case 'text': el.innerText = value; break;
case 'color': el.style.color = value; break;
case 'count': for (let i = 1; i <= Number(value); i++) {
document.body.appendChild(el.cloneNode(true));
} break;
}
});
}
document.body.appendChild(fieldset);
// 9 zadanie
var nextFormulage = document.createElement("form");
root.appendChild(nextFormulage);
var yourName = document.createElement("input");
yourName.setAttribute('type', 'text');
yourName.setAttribute('value', 'Name');
var lastname = document.createElement("input");
lastname.setAttribute('type', 'text');
lastname.setAttribute('value', 'Lastname');
var age = document.createElement("input");
age.setAttribute('type', 'text');
age.setAttribute('value', 'Age');
var howManyKids = document.createElement("input");
howManyKids.setAttribute('type', 'text');
howManyKids.setAttribute('value', 'HowManyKids');
nextFormulage.appendChild(yourName);
nextFormulage.appendChild(lastname);
nextFormulage.appendChild(age);
nextFormulage.appendChild(howManyKids);
var moreButton = document.createElement('button');
moreButton.id = "more"
moreButton.type = 'button'
moreButton.innerText = "Więcej";
nextFormulage.appendChild(moreButton);
var createButton = document.createElement('button');
createButton.id = "create"
createButton.type = 'button'
createButton.innerText = "Utwórz";
nextFormulage.appendChild(createButton);
var nameTab = [];
var lastnameTab = [];
var ageTab = [];
var kidsTab = [];
function moreFields(){
n | nt.querySelector('#more').addEventListener('click', moreFields);
function createTable(){
nameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
var tab = document.createElement("table");
var header = document.createElement('tr');
tab.appendChild(header);
var name = document.createElement('th');
name.innerHTML = "Name";
var lastName = document.createElement('th');
lastName.innerHTML = "Lastname";
var age1 = document.createElement('th');
age1.innerHTML = "Age";
var kids = document.createElement('th');
kids.innerHTML = "HowManyKids";
root.appendChild(tab);
header.appendChild(name);
header.appendChild(lastName);
header.appendChild(age1);
header.appendChild(kids);
for (var i = 0; i < nameTab.length; i++) {
var item = document.createElement('tr');
tab.appendChild(item);
var del = document.createElement('button');
del.innerText = "Usuń";
item.appendChild(del);
var newName = document.createElement('td');
newName.innerText = nameTab[i];
var newLastname = document.createElement('td');
newLastname.innerText = lastnameTab[i];
var newAge = document.createElement('td');
newAge.innerText = ageTab[i];
var newKids = document.createElement('td');
newKids.innerText= kidsTab[i];
item.appendChild(newName);
item.appendChild(newLastname);
item.appendChild(newAge);
item.appendChild(newKids);
item.appendChild(del);
del.addEventListener('click', deleteA);
}
nameTab = [];
lastnameTab = [];
ageTab = [];
kidsTab = [];
}
function deleteA(e) {
e.target .parentElement.remove()
}
document.querySelector('#create').addEventListener('click', createTable);
// 10 zadanie
let changeButton = document.createElement('button');
changeButton.id = "change"
changeButton.type = 'button'
changeButton.innerText = "Użyj dużych liter!";
root.appendChild(changeButton);
function changeLetters() {
document.querySelectorAll('tr').forEach((row) => {
row.querySelectorAll('td').forEach((cell) => {
cell.innerText = cell.innerText[0].toUpperCase() + cell.innerText.slice(1);
})
})
}
document.querySelector('#change').addEventListener('click', changeLetters);
// 11 zadanie
function extarctNumbersAndMultiplyToDivs(str) {
const numbers = str.match(/[0-9]+/g);
if (numbers.length > 0) {
console.log(numbers.reduce((a, b) => Number(a) + Number(b)));
const mmultiplier = numbers.reduce((a, b) => Number(a) * Number(b));
for (let i = 0; i < mmultiplier; i++) {
const div = document.createElement('div');
div.innerText = `div${i}`;
document.body.appendChild(div);
}
}
}
extarctNumbersAndMultiplyToDivs('foo1bar2test10nice2');
// 12 zadanie
function createObj(str) {
return {
string: str
}
}
const alaStr = createObj('Ala ma k | ameTab.push(yourName.value);
lastnameTab.push(lastname.value);
ageTab.push(age.value);
kidsTab.push(howManyKids.value);
yourName.value = '';
lastname.value = '';
age.value = '';
howManyKids.value = '';
}
docume | identifier_body |
main.go | hub.Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else { | fmt.Printf("GetLog | random_line_split |
|
main.go | .Run()
}
func Start() {
go consume()
}
func | () {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("GetLog | consume | identifier_name |
main.go | .Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) |
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l.dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("Get | {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
} | identifier_body |
main.go | hub.Run()
}
func Start() {
go consume()
}
func consume() {
for {
select {
case msg, more := <-consumer.Messages():
if more {
fmt.Printf("kafka consumer msg: (topic:%s) (partition:%d) (offset:%d) (%s): (%s)\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
loghub.Input() <- msg
// consumer.MarkOffset(msg, "completed") // mark message as processed
// fmt.Println("kafka consumer HighWaterMarks", consumer.HighWaterMarks())
}
case err, more := <-consumer.Errors():
if more {
fmt.Printf("Kafka consumer error: %v\n", err.Error())
}
case ntf, more := <-consumer.Notifications():
if more {
fmt.Printf("Kafka consumer rebalance: %v\n", ntf)
}
case <-sig:
fmt.Errorf("Stop consumer server...")
consumer.Close()
return
}
}
}
func Stop(s os.Signal) {
fmt.Println("Recived kafka consumer stop signal...")
sig <- s
fmt.Println("kafka consumer stopped!!!")
}
func main() {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
Start()
select {
case s := <-signals:
Stop(s)
}
}
type Config struct {
Name string
LogProject struct {
Name string
Endpoint string
AccessKeyID string
AccessKeySecret string
}
MessageChannelBufferSize int
LogsBufferSize int
Topics []string
LogsBufferSize4Logstore int
Logstores []string
}
type Loghub struct {
Name string
*Config
consumer *cluster.Consumer
logproject *sls.LogProject
logstores map[string]*sls.LogStore
messages chan *sarama.ConsumerMessage
messageChannelBufferSize int
m sync.RWMutex
stop chan int
mlogstoreLogsBuffer sync.RWMutex
logsBuffer4Logstore map[string](chan *topicLog) // by logstore
logsBufferSize4Logstore int
mlogsBuffer sync.RWMutex
// logsBuffer map[string](chan *topicLog) // by topic and logstore
logsBuffer map[string]map[string]chan *topicLog // by topic and logstore
logsBufferSize int
topics []string
}
type topicLog struct {
topic string
log *sls.Log
cmsg *sarama.ConsumerMessage
}
func NewLoghub(cfg *Config, consumer *cluster.Consumer) *Loghub {
logproject := &sls.LogProject{
Name: cfg.LogProject.Name,
Endpoint: cfg.LogProject.Endpoint,
AccessKeyID: cfg.LogProject.AccessKeyID,
AccessKeySecret: cfg.LogProject.AccessKeySecret,
}
lbls := map[string](chan *topicLog){}
lbr := map[string]map[string](chan *topicLog){}
// lbr := map[string](chan *topicLog){}
// for _, tp := range cfg.Topics {
// lbr[tp] = make(chan *topicLog, cfg.LogsBufferSize)
// }
lh := &Loghub{
Name: cfg.Name,
Config: cfg,
consumer: consumer,
logproject: logproject,
messages: make(chan *sarama.ConsumerMessage, cfg.MessageChannelBufferSize),
messageChannelBufferSize: cfg.MessageChannelBufferSize,
stop: make(chan int),
logsBuffer4Logstore: lbls,
logsBufferSize4Logstore: cfg.LogsBufferSize4Logstore,
logstores: map[string]*sls.LogStore{},
logsBuffer: lbr,
logsBufferSize: cfg.LogsBufferSize,
topics: cfg.Topics,
}
return lh
}
func (l *Loghub) Run() {
// lss, err := l.logproject.ListLogStore()
// if err != nil {
// panic(err)
// }
// 开启日志库
for _, lsn := range l.Logstores {
_, err | .dispatch()
}
func (l *Loghub) Input() chan<- *sarama.ConsumerMessage {
return l.messages
}
// dispatchToTopic
func (l *Loghub) dispatchToTopic(logstoreName string) {
// TODO: 处理消息, 分配到不同的topic
channelBuffer := l.getLogstoreLogsBufferChannel(logstoreName)
for {
select {
case log := <-channelBuffer:
l.mlogsBuffer.Lock()
logsCB, ok := l.logsBuffer[logstoreName]
if !ok || logsCB == nil {
logsCB = map[string]chan *topicLog{}
l.logsBuffer[logstoreName] = logsCB
}
logsCBTopic, ok := logsCB[log.topic]
if !ok || logsCBTopic == nil {
logsCBTopic = make(chan *topicLog, l.logsBufferSize)
logsCB[log.topic] = logsCBTopic
}
l.mlogsBuffer.Unlock()
logsCBTopic <- log
}
}
}
// dispatch
// 分配消息
func (l *Loghub) dispatch() error {
// TODO: logproject, logstore, topic
// 指定logproject和logstore进行分配
for {
select {
case msg := <-l.messages:
data, err := unserialize(msg)
if err != nil {
fmt.Println(err)
continue
}
logprojectName, ok1 := data["logproject"]
if !ok1 || logprojectName == "" {
fmt.Println("loghub.dispatch: data[\"logproject\"] was empty")
continue
}
logstoreName, ok2 := data["logstore"]
if !ok2 || logstoreName == "" {
fmt.Println("loghub.dispatch: data[\"logstore\"] was empty")
continue
}
topic, ok3 := data["topic"]
if !ok3 || topic == "" {
fmt.Println("loghub.dispatch: data[\"topic\"] was empty")
continue
}
log, err := generateLog(data)
if err != nil {
fmt.Println(err)
continue
}
// logstore, err := l.getLogstore(logstoreName)
// if err != nil {
// fmt.Println(err)
// continue
// }
log.cmsg = msg
lblsc := l.getLogstoreLogsBufferChannel(logstoreName)
select {
// TODO: 考虑优化处理, lblsc如果满了的情况
case lblsc <- log:
}
}
}
}
func (l *Loghub) Stop() {
l.stop <- 0
}
func (l *Loghub) processTopicMsg(topic string, logstoreName string) error {
cb := l.logsBuffer[logstoreName][topic]
for {
select {
case log := <-cb:
loggroup := generateLoggroupByTopicLog(log, "")
logstore, err := l.getLogstore(logstoreName)
if err != nil {
fmt.Println(err)
continue
}
err = putLogs(logstore, loggroup)
if err != nil {
fmt.Println(err)
continue
}
l.consumer.MarkOffset(log.cmsg, "loghub.processTopicMsg")
}
}
}
func (l *Loghub) getLogstore(logstoreName string) (*sls.LogStore, error) {
var logstore *sls.LogStore
l.m.RLock()
logstore = l.logstores[logstoreName]
l.m.RUnlock()
if logstore != nil {
return logstore, nil
}
var err error
for retry_times := 0; ; retry_times++ {
if retry_times > 5 {
return nil, errors.New("GetLogStore retry_times > 5")
}
logstore, err = l.logproject.GetLogStore(logstoreName)
if err != nil {
fmt.Printf("GetLogStore fail, retry:%d, err:%v\n", retry_times, err)
if strings.Contains(err.Error(), sls.PROJECT_NOT_EXIST) {
return nil, err
} else if strings.Contains(err.Error(), sls.LOGSTORE_NOT_EXIST) {
err = l.logproject.CreateLogStore(logstoreName, 1, 2)
if err != nil {
fmt.Printf("CreateLogStore fail, err: ", err.Error())
return nil, err
} else {
fmt.Println("CreateLogStore success")
l.m.Lock()
l.logstores[logstoreName] = logstore
l.m.Unlock()
return logstore, nil
}
}
} else {
fmt.Printf("GetLog | := l.getLogstore(lsn)
if err != nil {
fmt.Printf("Loghub Start failed (logstoreName=%s). err: %v\n", lsn, err)
panic(err)
}
// 分配到topic
go l.dispatchToTopic(lsn)
for _, tp := range l.topics {
go l.processTopicMsg(lsn, tp)
}
}
// 分配消息
go l | conditional_block |
projectstate.rs | the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> |
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changes | {
*self.last_event.read()
} | identifier_body |
projectstate.rs | {
/// URLs that are permitted for cross original JavaScript requests.
pub allowed_domains: Vec<String>,
}
/// The project state snapshot represents a known server state of
/// a project.
///
/// This is generally used by an indirection of `ProjectState` which
/// manages a view over it which supports concurrent updates in the
/// background.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ProjectStateSnapshot {
/// The timestamp of when the snapshot was received.
pub last_fetch: DateTime<Utc>,
/// The timestamp of when the last snapshot was changed.
///
/// This might be `None` in some rare cases like where snapshots
/// are faked locally.
pub last_change: Option<DateTime<Utc>>,
/// Indicates that the project is disabled.
pub disabled: bool,
/// A container of known public keys in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
| ProjectConfig | identifier_name |
|
projectstate.rs | in the project.
pub public_keys: HashMap<String, bool>,
/// The project's slug if available.
pub slug: Option<String>,
/// The project's current config
pub config: ProjectConfig,
/// The project state's revision id.
pub rev: Option<Uuid>,
}
/// A helper enum indicating the public key state.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyStatus {
/// The state of the public key is not known.
///
/// This can indicate that the key is not yet known or that the
/// key just does not exist. We can not tell these two cases
/// apart as there is always a lag since the last update from the
/// upstream server. As such the project state uses a heuristic
/// to decide if it should treat a key as not existing or just
/// not yet known.
Unknown,
/// This key is known but was disabled.
Disabled,
/// This key is known and is enabled.
Enabled,
}
/// Indicates what should happen to events based on the public key
/// of a project.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum PublicKeyEventAction {
/// Indicates that this event should be queued.
Queue,
/// Indicates that this event should be discarded.
Discard,
/// Indicates that this event should be sent now.
Send,
}
/// An event that was not sent yet.
#[derive(Debug)]
struct PendingStore {
added_at: Instant,
changeset: StoreChangeset,
}
/// Gives access to the project's remote state.
///
/// This wrapper is sync and can be updated concurrently. As the type is
/// sync all of the methods can be used on a shared instance. The type
/// internally locks automatically.
#[derive(Debug)]
pub struct ProjectState {
config: Arc<AortaConfig>,
project_id: ProjectId,
current_snapshot: RwLock<Option<Arc<ProjectStateSnapshot>>>,
pending_stores: RwLock<Vec<PendingStore>>,
requested_new_snapshot: AtomicBool,
request_manager: Arc<RequestManager>,
last_event: RwLock<Option<DateTime<Utc>>>,
}
impl ProjectStateSnapshot {
/// Returns the current status of a key.
pub fn get_public_key_status(&self, public_key: &str) -> PublicKeyStatus {
match self.public_keys.get(public_key) {
Some(&true) => PublicKeyStatus::Enabled,
Some(&false) => PublicKeyStatus::Disabled,
None => PublicKeyStatus::Unknown,
}
}
/// Checks if a public key is enabled.
pub fn public_key_is_enabled(&self, public_key: &str) -> bool {
self.get_public_key_status(public_key) == PublicKeyStatus::Enabled
}
/// Returns `true` if the entire project should be considered
/// disabled (blackholed, deleted etc.).
pub fn disabled(&self) -> bool {
self.disabled
}
/// Returns true if the snapshot is outdated.
pub fn outdated(&self, config: &AortaConfig) -> bool {
// TODO(armin): change this to a value from the config
self.last_fetch < Utc::now() - config.snapshot_expiry
}
/// Returns the project config.
pub fn config(&self) -> &ProjectConfig {
&self.config
}
}
impl ProjectState {
/// Creates a new project state.
///
/// The project state is created without storing a snapshot. This means
/// that accessing the snapshot will panic until the data becomes available.
///
/// The config is taken as `Arc` so we can share it effectively across
/// multiple project states and troves.
pub fn new(
project_id: ProjectId,
config: Arc<AortaConfig>,
request_manager: Arc<RequestManager>,
) -> ProjectState {
ProjectState {
project_id: project_id,
config: config,
current_snapshot: RwLock::new(None),
pending_stores: RwLock::new(Vec::new()),
requested_new_snapshot: AtomicBool::new(false),
request_manager: request_manager,
last_event: RwLock::new(None),
}
}
/// Adds a query that should be issued with the next heartbeat.
pub fn add_query<Q, R, F, E>(&self, query: Q, callback: F) -> Uuid
where
Q: AortaQuery<Response = R>,
R: DeserializeOwned + 'static,
F: FnMut(&ProjectState, Result<R, QueryError>) -> Result<(), E> + Sync + Send + 'static,
E: fmt::Debug,
{
self.request_manager
.add_query(self.project_id(), query, callback)
}
/// The project ID of this project.
pub fn project_id(&self) -> ProjectId {
self.project_id
}
/// The direct upstream that reported the snapshot.
///
/// Currently an relay only ever has one trove and that trove can only have
/// one upstream descriptor. As a result of this, this descriptor will always
/// match the one of the trove which holds the project state.
pub fn upstream(&self) -> &UpstreamDescriptor {
&self.config.upstream
}
/// Returns the time of the last event received (but not forwarded).
///
/// This timestamp is used to indicate that the project has activity
/// for the trove. As the trove needs to expire items it uses this
/// timestamp to expire.
pub fn last_event(&self) -> Option<DateTime<Utc>> {
*self.last_event.read()
}
/// Returns the time of the last config fetch.
pub fn last_config_fetch(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref()
.map(|x| x.last_fetch.clone())
}
/// Returns the time of the last config change.
pub fn last_config_change(&self) -> Option<DateTime<Utc>> {
self.current_snapshot
.read()
.as_ref() | .and_then(|x| x.last_change.clone())
}
/// Requests an update to the project config to be fetched.
pub fn request_updated_project_config(&self) {
if self.requested_new_snapshot
.compare_and_swap(false, true, Ordering::Relaxed)
{
return;
}
debug!("requesting updated project config for {}", self.project_id);
self.add_query(GetProjectConfigQuery, move |ps, rv| -> Result<(), ()> {
if let Ok(snapshot_opt) = rv {
ps.requested_new_snapshot.store(false, Ordering::Relaxed);
match snapshot_opt {
Some(snapshot) => ps.set_snapshot(snapshot),
None => ps.set_missing_snapshot(),
}
} else {
// TODO: error handling
rv.unwrap();
}
Ok(())
});
}
/// Checks if events should be buffered for a public key.
///
/// Events should be buffered until a key becomes available nor we
/// absolutely know that the key does not exist. There is some
/// internal logic here that based on the age of the snapshot and
/// some global disabled settings will indicate different behaviors.
pub fn get_public_key_event_action(&self, public_key: &str) -> PublicKeyEventAction {
match *self.current_snapshot.read() {
Some(ref snapshot) => {
// in case the entire project is disabled we always discard
// events unless the snapshot is outdated. In case the
// snapshot is outdated we might fall back to sending or
// discarding as well based on the key status.
if !snapshot.outdated(&self.config) && snapshot.disabled() {
return PublicKeyEventAction::Discard;
}
match snapshot.get_public_key_status(public_key) {
PublicKeyStatus::Enabled => PublicKeyEventAction::Send,
PublicKeyStatus::Disabled => PublicKeyEventAction::Discard,
PublicKeyStatus::Unknown => {
// we don't know the key yet, ensure we fetch it on the next
// heartbeat.
self.request_updated_project_config();
// if the last config fetch was more than a minute ago we just
// accept the event because at this point the dsn might have
// become available upstream.
if snapshot.outdated(&self.config) {
PublicKeyEventAction::Queue
// we just assume the config did not change in the last 60
// seconds and the dsn is indeed not seen yet.
} else {
PublicKeyEventAction::Discard
}
}
}
}
// in the absence of a snapshot we generally queue
None => {
self.request_updated_project_config();
PublicKeyEventAction::Queue
}
}
}
/// Validates the origin.
pub fn is_valid_origin(&self, origin: &Url) -> bool {
self.snapshot_opt().map_or(true, |snapshot| {
let allowed = &snapshot.config().allowed_domains;
!allowed.is_empty()
&& allowed
.iter()
.any(|x| x.as_str() == "*" || Some(x.as_str()) == origin.host_str())
})
}
/// Given a public key and an event this handles an event.
///
/// It either puts it into an internal queue, sends it or discards it. If the item
/// was discarded `false` is returned.
pub fn store_changeset<'a>(&self, changeset: StoreChangeset) -> bool {
if let Some(ref origin) = changeset | random_line_split |
|
exportAnswersToXLSX.py | Id
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def | (
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook | findAllQuestionId | identifier_name |
exportAnswersToXLSX.py | and parse for the HTML content
if qImprovementPlanUrl:
jmesquery = "[?QuestionId=='"+answers['QuestionId']+"'].Choices[].ChoiceId"
choiceList = jmespath.search(jmesquery, allQuestionsForLens)
ipList = getImprovementPlanItems(WACLIENT,workloadId,lens,answers['QuestionId'],answers['PillarId'],qImprovementPlanUrl,choiceList)
else:
ipList = []
startingCellID=cellID
# If its the first time through this particular pillar question:
# I want to only write the name once, but I need to fill in
# each cell with the same data so the autosort works properly
# (else it will only show the first best practice)
firstTimePillar=True
for choices in answers['Choices']:
# Write the pillar name and question in every cell for autosort, but only show the first one
cell = 'A'+str(cellID)
if firstTimePillar:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellnoborder)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellnoborder)
firstTimePillar=False
else:
worksheet.write(cell, PILLAR_PROPER_NAME_MAP[pillar], myCellhidden)
cell = 'B'+str(cellID)
worksheet.write(cell, questionTitle, myCellhidden)
# Start writing each of the BP's, details, etc
cell = 'D'+str(cellID)
Title = choices['Title'].replace(' ','').replace('\t', '').replace('\n', '')
if any(choices['ChoiceId'] in d for d in ipList):
worksheet.write_url(cell, ipList[choices['ChoiceId']], myCell, string=Title)
#ipItemHTML, questionIdText = getImprovementPlanHTMLDescription(ipList[choices['ChoiceId']],answers['PillarId'])
#htmlString = ipItemHTML.text
htmlString = ""
htmlString = htmlString.replace('\n ','').replace(' ','').replace('\t', '').strip().rstrip()
# print(htmlString)
worksheet.write_comment(cell, htmlString, {'author': 'Improvement Plan'})
else:
worksheet.write(cell,Title,myCell)
# Add all Details for each best practice/choice
cell = 'E'+str(cellID)
# Remove all of the extra spaces in the description field
Description = choices['Description'].replace('\n ','')
Description = Description.replace('\n ','')
Description = Description.replace(' ','').replace('\t', '').replace('\n', '')
Description = Description.rstrip()
Description = Description.strip()
worksheet.write(cell, Description ,myCell)
# If this is an existing workload, we will show SELECTED if the have it checked
# I would love to use a XLSX checkbox, but this library doesn't support it
cell = 'F'+str(cellID)
responseText = ""
if choices['ChoiceId'] in answers['SelectedChoices']:
responseText = "SELECTED"
else:
responseText = ""
worksheet.write(cell, responseText ,myCell)
cellID+=1
# We are out of the choice/detail/response loop, so know how many rows were consumed
# and we can create the explanation and notes field to span all of them
# Explanantion field
cellMerge = 'C'+str(startingCellID)+':C'+str(cellID-1)
worksheet.merge_range(cellMerge, qDescription,myCell)
# Notes field
cellMerge = 'G'+str(startingCellID)+':G'+str(cellID-1)
if WORKLOADID:
worksheet.merge_range(cellMerge, qNotes, myCell)
else:
worksheet.merge_range(cellMerge, "", myCell)
cellID-=1
# Increase the question number
qNum += 1
# Reset the starting cellPosition to the last cellID
cellPosition = cellID
# Reset the cell formatting to alternate between the two colors
if myCell == lineA:
myCell = lineB
myCellhidden = lineBhidden
myCellnoborder = lineBnoborder
else:
myCell = lineA
myCellhidden = lineAhidden
myCellnoborder = lineAnoborder
def main():
boto3_min_version = "1.16.38"
# Verify if the version of Boto3 we are running has the wellarchitected APIs included
if (packaging.version.parse(boto3.__version__) < packaging.version.parse(boto3_min_version)):
logger.error("Your Boto3 version (%s) is less than %s. You must ugprade to run this script (pip3 upgrade boto3)" % (boto3.__version__, boto3_min_version))
exit()
logger.info("Script version %s" % __version__)
logger.info("Starting Boto %s Session" % boto3.__version__)
# Create a new boto3 session
SESSION1 = boto3.session.Session(profile_name=PROFILE)
# Initiate the well-architected session using the region defined above
WACLIENT = SESSION1.client(
service_name='wellarchitected',
region_name=REGION_NAME,
)
# If this is an existing workload, we need to query for the various workload properties
if WORKLOADID:
logger.info("User specified workload id of %s" % WORKLOADID)
workloadJson = GetWorkload(WACLIENT,WORKLOADID)
LENSES = workloadJson['Lenses']
logger.info("Lenses for %s: %s" % (WORKLOADID, json.dumps(LENSES)))
WORKLOADNAME = workloadJson['WorkloadName']
DESCRIPTION = workloadJson['Description']
REVIEWOWNER = workloadJson['ReviewOwner']
ENVIRONMENT= workloadJson['Environment']
AWSREGIONS = workloadJson['AwsRegions']
workloadId = WORKLOADID
workloadARN = workloadJson['WorkloadArn']
else:
# In order to gather all of the questions, you must create a TEMP Workload
logger.info("No workload ID specified, we will create a TEMP workload")
# Grab all lenses that are currently available
LENSES = listLens(WACLIENT)
logger.info("Lenses available: "+json.dumps(LENSES))
# Set the needed workload variables before we create it
WORKLOADNAME = 'TEMP DO NOT USE WORKLOAD'
DESCRIPTION = 'TEMP DO NOT USE WORKLOAD'
REVIEWOWNER = 'WA Python Script'
ENVIRONMENT= 'PRODUCTION'
AWSREGIONS = [REGION_NAME]
# Creating the TEMP workload
logger.info("Creating a new workload to gather questions and answers")
workloadId, workloadARN = CreateNewWorkload(WACLIENT,WORKLOADNAME,DESCRIPTION,REVIEWOWNER,ENVIRONMENT,AWSREGIONS,LENSES,"[]","[]")
# Create an new xlsx file and add a worksheet.
logger.info("Creating xlsx file '"+FILENAME+"'")
workbook = xlsxwriter.Workbook(FILENAME)
workbook.set_size(2800, 1600)
# Simple hack to get Wellarchitected base framework first (reverse sort)
# This will no longer work if we ever have a lens that starts with WB*, X, Y, or Z :)
LENSES.sort(reverse=True)
# Iterate over each lens that we either have added or is in the workload
for lens in LENSES:
# Grab all questions for a particular lens
allQuestions = findAllQuestionId(WACLIENT,workloadId,lens)
if WORKLOADID:
# If this is an existing workload, just go ahead and create the Tab and cells
logger.debug("Not answering questions for existing workload")
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions,WORKLOADNAME,workloadARN,DESCRIPTION)
else:
# If this is the TEMP workload, we need to first gather all of the questionIDs possible
jmesquery = "[*].{QuestionId: QuestionId, PillarId: PillarId, Choices: Choices[].ChoiceId}"
allQuestionIds = jmespath.search(jmesquery, allQuestions)
# Next we answer all of the questions across all lenses in the TEMP workload
for question in allQuestionIds:
logger.debug("Answering question %s in the %s lens" % (question['QuestionId'], lens))
updateAnswersForQuestion(WACLIENT,workloadId,lens,question['QuestionId'],question['Choices'],'TEMP WORKLOAD - Added by export script')
# Once the questions have been answered, we go ahead and create the tab for each
lensTabCreation(WACLIENT,workloadId,lens,workbook,allQuestions)
# Close out the workbook file
logger.info("Closing Workbook File")
workbook.close()
# If this is TEMP workload, we may remove it if it has not been set to keep
if not WORKLOADID:
if not KEEPTEMP:
logger.info("Removing TEMP Workload")
DeleteWorkload(WACLIENT, workloadId)
logger.info("Done")
if __name__ == "__main__":
| main() | conditional_block |
|
exportAnswersToXLSX.py | :",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP[PillarId] in i:
bsparse = BeautifulSoup(i,features="html.parser")
questionIdText = str(bsparse.text).split(':')[0].strip()
if (secondItem in i) or ("</div>" in i):
foundit = 0
if firstItem in i:
foundit = 1
ipString+=i
elif foundit:
ipString+=i
prettyHTML = BeautifulSoup(ipString,features="html.parser")
# Need to remove all of the "local glossary links" since they point to relative paths
for a in prettyHTML.findAll('a', 'glossref'):
a.replaceWithChildren()
return prettyHTML, questionIdText
def lensTabCreation(
WACLIENT,
workloadId,
lens,
workbook,
allQuestionsForLens,
workloadName="",
AWSAccountId="",
workloadDescription=""
):
# Setup some formatting for the workbook
bold = workbook.add_format({'bold': True})
bold_border = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True
})
bold_border_bold = workbook.add_format({
'border': 1,
'border_color': 'black',
'text_wrap': True,
'font_size': 20,
'bold': True
})
heading = workbook.add_format({
'font_size': 24,
'bold': True
})
lineA = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineB = workbook.add_format({
'border': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E0EBF6',
'align': 'top',
'text_wrap': True
})
lineBnoborder = workbook.add_format({
'border': 0,
'top': 1,
'left': 1,
'right': 1,
'border_color': 'black',
'bg_color': '#E4EFDC',
'align': 'top',
'text_wrap': True
})
lineAhidden = workbook.add_format({
'border': 0,
'left': 1,
'right': 1,
'border_color': 'black', | 'bg_color': '#E0EBF6',
'align': 'top', | random_line_split |
|
exportAnswersToXLSX.py | logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
workloadId = response['WorkloadId']
workloadARN = response['WorkloadArn']
return workloadId, workloadARN
def FindWorkload(
waclient,
workloadName
):
# Finding your WorkloadId
try:
response=waclient.list_workloads(
WorkloadNamePrefix=workloadName
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print("Full JSON:",json.dumps(response['WorkloadSummaries'], cls=DateTimeEncoder))
workloadId = response['WorkloadSummaries'][0]['WorkloadId']
workloadArn = response['WorkloadSummaries'][0]['WorkloadArn']
# print("WorkloadId",workloadId)
return workloadId, workloadArn
def DeleteWorkload(
waclient,
workloadId
):
# Delete the WorkloadId
try:
response=waclient.delete_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
def GetWorkload(
waclient,
workloadId
):
# Get the WorkloadId
try:
response=waclient.get_workload(
WorkloadId=workloadId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
exit()
# print("Full JSON:",json.dumps(response['Workload'], cls=DateTimeEncoder))
workload = response['Workload']
# print("WorkloadId",workloadId)
return workload
def listLens(
waclient
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
lenses = jmespath.search("LensSummaries[*].LensAlias", response)
return lenses
def getCurrentLensVersion(
waclient,
lensAlias
):
# List all lenses currently available
try:
response=waclient.list_lenses()
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
searchString = "LensSummaries[?LensAlias==`"+lensAlias+"`].LensVersion"
lenses = jmespath.search(searchString, response)
return lenses[0]
def findAllQuestionId(
waclient,
workloadId,
lensAlias
):
answers = []
# Due to a bug in some lenses, I have to iterate over each pillar in order to
# retrieve the correct results.
for pillar in PILLAR_PARSE_MAP:
logger.debug("Grabbing answers for %s %s" % (lensAlias, pillar))
# Find a questionID using the questionTitle
try:
response=waclient.list_answers(
WorkloadId=workloadId,
LensAlias=lensAlias,
PillarId=pillar
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
while "NextToken" in response:
try:
response = waclient.list_answers(WorkloadId=workloadId,LensAlias=lensAlias,PillarId=pillar,NextToken=response["NextToken"])
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
answers.extend(response["AnswerSummaries"])
return answers
def getQuestionDetails(
waclient,
workloadId,
lensAlias,
questionId
):
# Find a answer for a questionId
try:
response=waclient.get_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
qDescription = jmespath.search("Answer.QuestionDescription", response)
qImprovementPlanUrl = jmespath.search("Answer.ImprovementPlanUrl", response)
qHelpfulResourceUrl = jmespath.search("Answer.HelpfulResourceUrl", response)
qNotes = jmespath.search("Answer.Notes", response)
return qDescription, qImprovementPlanUrl, qHelpfulResourceUrl, qNotes
def updateAnswersForQuestion(
waclient,
workloadId,
lensAlias,
questionId,
selectedChoices,
notes
):
# Update a answer to a question
try:
response=waclient.update_answer(
WorkloadId=workloadId,
LensAlias=lensAlias,
QuestionId=questionId,
SelectedChoices=selectedChoices,
Notes=notes
)
except botocore.exceptions.ParamValidationError as e:
logger.error("ERROR - Parameter validation error: %s" % e)
except botocore.exceptions.ClientError as e:
logger.error("ERROR - Unexpected error: %s" % e)
# print(json.dumps(response))
jmesquery = "Answer.SelectedChoices"
answers = jmespath.search(jmesquery, response)
return answers
def getImprovementPlanItems(
waclient,
workloadId,
lensAlias,
QuestionId,
PillarId,
ImprovementPlanUrl,
ChoiceList
):
# This will parse the IP Items to gather the links we need
response = {}
htmlString = ""
# unanswered = getUnansweredForQuestion(waclient,workloadId,'wellarchitected',QuestionId)
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
ipHTMLList = {}
for line in htmlSplit:
for uq in ChoiceList:
if uq in line:
parsed = BeautifulSoup(line,features="html.parser")
ipHTMLList.update({uq: str(parsed.a['href'])})
return ipHTMLList
def getImprovementPlanHTMLDescription(
ImprovementPlanUrl,
PillarId
):
logger.debug("ImprovementPlanUrl: %s for pillar %s " % (ImprovementPlanUrl,PILLAR_PARSE_MAP[PillarId]))
stepRaw = ImprovementPlanUrl.rsplit('#')[1]
# Grab the number of the step we are referencing
# This will work as long as their are less than 99 steps.
if len(stepRaw) <= 5:
stepNumber = stepRaw[-1]
else:
stepNumber = stepRaw[-2]
#Generate the string for the step number
firstItem = "step"+stepNumber
secondItem = ("step"+str((int(stepNumber)+1)))
logger.debug ("Going from %s to %s" % (firstItem, secondItem))
urlresponse = urllib.request.urlopen(ImprovementPlanUrl)
htmlBytes = urlresponse.read()
htmlStr = htmlBytes.decode("utf8")
htmlSplit = htmlStr.split('\n')
foundit = 0
ipString = ""
questionIdText = ""
for i in htmlSplit:
if PILLAR_PARSE_MAP | try:
response=waclient.create_workload(
WorkloadName=workloadName,
Description=description,
ReviewOwner=reviewOwner,
Environment=environment,
AwsRegions=awsRegions,
Lenses=lenses,
NonAwsRegions=nonAwsRegions,
ArchitecturalDesign=architecturalDesign,
IndustryType=industryType,
Industry=industry,
Notes=notes,
AccountIds=accountIds
)
except waclient.exceptions.ConflictException as e:
workloadId,workloadARN = FindWorkload(waclient,workloadName)
logger.error("ERROR - The workload name %s already exists as workloadId %s" % (workloadName, workloadId))
return workloadId, workloadARN
except botocore.exceptions.ParamValidationError as e: | identifier_body |
|
network_context.rs | String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> | .await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move | {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
}) | identifier_body |
network_context.rs | String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn | (
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking(move | new | identifier_name |
network_context.rs | String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => |
}
}
Err(e) => {
network_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange request to peer {peer_id:?}: {e}");
Err(e)
}
}
});
}
let make_failure_message = || {
let mut message = String::new();
message.push_str("ChainExchange request failed for all top peers. ");
message.push_str(&format!(
"{} network failures, ",
network_failures.load(Ordering::Relaxed)
));
message.push_str(&format!(
"{} lookup failures, ",
lookup_failures.load(Ordering::Relaxed)
));
message.push_str(&format!("request:\n{request:?}",));
message
};
let v = batch.get_ok().await.ok_or_else(make_failure_message)?;
debug!("Succeed: handle_chain_exchange_request");
v
}
};
// Log success for the global request with the latency from before sending.
match SystemTime::now().duration_since(global_pre_time) {
Ok(t) => self.peer_manager.log_global_success(t).await,
Err(e) => {
warn!("logged time less than before request: {}", e);
}
}
Ok(chain_exchange_result)
}
/// Send a `chain_exchange` request to the network and await response.
async fn chain_exchange_request(
peer_manager: Arc<PeerManager>,
network_send: flume::Sender<NetworkMessage>,
peer_id: PeerId,
request: ChainExchangeRequest,
) -> Result<ChainExchangeResponse, String> {
debug!("Sending ChainExchange Request to {peer_id}");
let req_pre_time = SystemTime::now();
let (tx, rx) = flume::bounded(1);
if network_send
.send_async(NetworkMessage::ChainExchangeRequest {
peer_id,
request,
response_channel: tx,
})
.await
.is_err()
{
return Err("Failed to send chain exchange request to network".to_string());
};
// Add timeout to receiving response from p2p service to avoid stalling.
// There is also a timeout inside the request-response calls, but this ensures
// this.
let res =
tokio::task::spawn_blocking | {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
} | conditional_block |
network_context.rs | use crate::blocks::{FullTipset, Tipset, TipsetKeys};
use crate::libp2p::{
chain_exchange::{
ChainExchangeRequest, ChainExchangeResponse, CompactedMessages, TipsetBundle, HEADERS,
MESSAGES,
},
hello::{HelloRequest, HelloResponse},
rpc::RequestResponseError,
NetworkMessage, PeerId, PeerManager, BITSWAP_TIMEOUT,
};
use anyhow::Context;
use cid::Cid;
use fvm_ipld_blockstore::Blockstore;
use fvm_ipld_encoding::CborStore;
use serde::de::DeserializeOwned;
use std::future::Future;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, trace, warn};
/// Timeout for response from an RPC request
// TODO this value can be tweaked, this is just set pretty low to avoid peers
// timing out requests from slowing the node down. If increase, should create a
// countermeasure for this.
const CHAIN_EXCHANGE_TIMEOUT: Duration = Duration::from_secs(5);
/// Maximum number of concurrent chain exchange request being sent to the
/// network.
const MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS: usize = 2;
/// Context used in chain sync to handle network requests.
/// This contains the peer manager, P2P service interface, and [`Blockstore`]
/// required to make network requests.
pub(in crate::chain_sync) struct SyncNetworkContext<DB> {
/// Channel to send network messages through P2P service
network_send: flume::Sender<NetworkMessage>,
/// Manages peers to send requests to and updates request stats for the
/// respective peers.
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
}
impl<DB> Clone for SyncNetworkContext<DB> {
fn clone(&self) -> Self {
Self {
network_send: self.network_send.clone(),
peer_manager: self.peer_manager.clone(),
db: self.db.clone(),
}
}
}
/// Race tasks to completion while limiting the number of tasks that may execute concurrently.
/// Once a task finishes without error, the rest of the tasks are canceled.
struct RaceBatch<T> {
tasks: JoinSet<Result<T, String>>,
semaphore: Arc<Semaphore>,
}
impl<T> RaceBatch<T>
where
T: Send + 'static,
{
pub fn new(max_concurrent_jobs: usize) -> Self {
RaceBatch {
tasks: JoinSet::new(),
semaphore: Arc::new(Semaphore::new(max_concurrent_jobs)),
}
}
pub fn add(&mut self, future: impl Future<Output = Result<T, String>> + Send + 'static) {
let sem = self.semaphore.clone();
self.tasks.spawn(async move {
let permit = sem
.acquire_owned()
.await
.map_err(|_| "Semaphore unexpectedly closed")?;
let result = future.await;
drop(permit);
result
});
}
/// Return first finishing `Ok` future else return `None` if all jobs failed
pub async fn get_ok(mut self) -> Option<T> {
while let Some(result) = self.tasks.join_next().await {
if let Ok(Ok(value)) = result {
return Some(value);
}
}
// So far every task have failed
None
}
}
impl<DB> SyncNetworkContext<DB>
where
DB: Blockstore,
{
pub fn new(
network_send: flume::Sender<NetworkMessage>,
peer_manager: Arc<PeerManager>,
db: Arc<DB>,
) -> Self {
Self {
network_send,
peer_manager,
db,
}
}
/// Returns a reference to the peer manager of the network context.
pub fn peer_manager(&self) -> &PeerManager {
self.peer_manager.as_ref()
}
/// Send a `chain_exchange` request for only block headers (ignore
/// messages). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_headers(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<Arc<Tipset>>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, HEADERS)
.await
}
/// Send a `chain_exchange` request for only messages (ignore block
/// headers). If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_messages(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
count: u64,
) -> Result<Vec<CompactedMessages>, String> {
self.handle_chain_exchange_request(peer_id, tsk, count, MESSAGES)
.await
}
/// Send a `chain_exchange` request for a single full tipset (includes
/// messages) If `peer_id` is `None`, requests will be sent to a set of
/// shuffled peers.
pub async fn chain_exchange_fts(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
) -> Result<FullTipset, String> {
let mut fts = self
.handle_chain_exchange_request(peer_id, tsk, 1, HEADERS | MESSAGES)
.await?;
if fts.len() != 1 {
return Err(format!(
"Full tipset request returned {} tipsets",
fts.len()
));
}
Ok(fts.remove(0))
}
/// Requests that some content with a particular `Cid` get fetched over
/// `Bitswap` if it doesn't exist in the `BlockStore`.
pub async fn bitswap_get<TMessage: DeserializeOwned>(
&self,
content: Cid,
) -> Result<TMessage, String> {
// Check if what we are fetching over Bitswap already exists in the
// database. If it does, return it, else fetch over the network.
if let Some(b) = self.db.get_cbor(&content).map_err(|e| e.to_string())? {
return Ok(b);
}
let (tx, rx) = flume::bounded(1);
self.network_send
.send_async(NetworkMessage::BitswapRequest {
cid: content,
response_channel: tx,
})
.await
.map_err(|_| "failed to send bitswap request, network receiver dropped")?;
let success = tokio::task::spawn_blocking(move || {
rx.recv_timeout(BITSWAP_TIMEOUT).unwrap_or_default()
})
.await
.is_ok();
match self.db.get_cbor(&content) {
Ok(Some(b)) => Ok(b),
Ok(None) => Err(format!(
"Not found in db, bitswap. success: {success} cid, {content:?}"
)),
Err(e) => Err(format!(
"Error retrieving from db. success: {success} cid, {content:?}, {e}"
)),
}
}
/// Helper function to handle the peer retrieval if no peer supplied as well
/// as the logging and updating of the peer info in the `PeerManager`.
async fn handle_chain_exchange_request<T>(
&self,
peer_id: Option<PeerId>,
tsk: &TipsetKeys,
request_len: u64,
options: u64,
) -> Result<Vec<T>, String>
where
T: TryFrom<TipsetBundle, Error = String> + Send + Sync + 'static,
{
let request = ChainExchangeRequest {
start: tsk.cids().to_vec(),
request_len,
options,
};
let global_pre_time = SystemTime::now();
let network_failures = Arc::new(AtomicU64::new(0));
let lookup_failures = Arc::new(AtomicU64::new(0));
let chain_exchange_result = match peer_id {
// Specific peer is given to send request, send specifically to that peer.
Some(id) => Self::chain_exchange_request(
self.peer_manager.clone(),
self.network_send.clone(),
id,
request,
)
.await?
.into_result()?,
None => {
// No specific peer set, send requests to a shuffled set of top peers until
// a request succeeds.
let peers = self.peer_manager.top_peers_shuffled().await;
let mut batch = RaceBatch::new(MAX_CONCURRENT_CHAIN_EXCHANGE_REQUESTS);
for peer_id in peers.into_iter() {
let peer_manager = self.peer_manager.clone();
let network_send = self.network_send.clone();
let request = request.clone();
let network_failures = network_failures.clone();
let lookup_failures = lookup_failures.clone();
batch.add(async move {
match Self::chain_exchange_request(
peer_manager,
network_send,
peer_id,
request,
)
.await
{
Ok(chain_exchange_result) => {
match chain_exchange_result.into_result::<T>() {
Ok(r) => Ok(r),
Err(e) => {
lookup_failures.fetch_add(1, Ordering::Relaxed);
debug!("Failed chain_exchange response: {e}");
Err(e)
}
}
}
Err(e) => | };
| random_line_split |
|
model.py |
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def | (generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
save_generated_images(generated_images, epoch, batch_number)
time_elapsed = time.time() | save_generated_images | identifier_name |
model.py | 8
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
| # the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
save_generated_images(generated_images, epoch, batch_number)
time_elapsed = time.time() - | generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and | identifier_body |
model.py |
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, 1, 100))
# We try to mislead the discriminator by giving the opposite labels
fake_y = (np.ones(current_batch_size * 2) -
np.random.random_sample(current_batch_size * 2) * 0.2)
g_loss = gan.train_on_batch(noise, fake_y)
adversarial_loss = np.append(adversarial_loss, g_loss)
batches = np.append(batches, current_batch)
# Each 50 batches show and save images
if((batch_number + 1) % 50 == 0 and
current_batch_size == batch_size):
|
time_elapsed = time.time() | save_generated_images(generated_images, epoch, batch_number) | conditional_block |
model.py | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, Reshape
from keras.layers import Flatten, BatchNormalization, Dense, Activation
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Here is where we will load the dataset stored in dataset_path. In this script
# we will use the Caltech-UCSD Birds-200-2011 dataset which includes 11788
# images from 200 different birds. We will feed the images without applying
# the provided bounding boxes from the dataset. The data will only be resized
# and normalized. Keras ImageDataGenerator will be used for loading the dataset
def load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape):
#s3 = boto3.client('s3',
# aws_access_key_id = aws_access,
# aws_secret_access_key = aws_secret)
#s3.download_file('crystals-gdg', 'raw.zip', 'raw.zip')
#zip_ref = ZipFile('raw.zip', 'r')
#zip_ref.extractall()
#zip_ref.close()
#os.remove('raw.zip')
dataset_generator = ImageDataGenerator()
dataset_generator = dataset_generator.flow_from_directory(
dataset_path, target_size=(image_shape[0], image_shape[1]),
batch_size=batch_size,
class_mode=None)
return dataset_generator
# Creates the discriminator model. This model tries to classify images as real
# or fake.
def construct_discriminator(image_shape):
discriminator = Sequential()
discriminator.add(Conv2D(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform',
input_shape=(image_shape)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Conv2D(filters=512, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
discriminator.add(BatchNormalization(momentum=0.5))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Flatten())
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
optimizer = Adam(lr=0.0002, beta_1=0.5)
discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return discriminator
# Creates the generator model. This model has an input of random noise and
# generates an image that will try mislead the discriminator.
def construct_generator():
generator = Sequential()
generator.add(Dense(units=8 * 8 * 512,
kernel_initializer='glorot_uniform',
input_shape=(1, 1, 100)))
generator.add(Reshape(target_shape=(8, 8, 512)))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(BatchNormalization(momentum=0.5))
generator.add(Activation('relu'))
generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
strides=(2, 2), padding='same',
data_format='channels_last',
kernel_initializer='glorot_uniform'))
generator.add(Activation('tanh'))
optimizer = Adam(lr=0.00015, beta_1=0.5)
generator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=None)
return generator
# Displays a figure of the generated images and saves them in as .png image
def save_generated_images(generated_images, epoch, batch_number):
plt.figure(figsize=(8, 8), num=2)
gs1 = gridspec.GridSpec(8, 8)
gs1.update(wspace=0, hspace=0)
for i in range(64):
ax1 = plt.subplot(gs1[i])
ax1.set_aspect('equal')
image = generated_images[i, :, :, :]
image += 1
image *= 127.5
fig = plt.imshow(image.astype(np.uint8))
plt.axis('off')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
save_name = 'generated/generatedSamples_epoch' + str(
epoch + 1) + '_batch' + str(batch_number + 1) + '.png'
plt.savefig(save_name, bbox_inches='tight', pad_inches=0)
plt.pause(0.0000000001)
plt.show()
# Main train function
def train_dcgan(aws_access, aws_secret, batch_size, epochs, image_shape, dataset_path):
# Build the adversarial model that consists in the generator output
# connected to the discriminator
generator = construct_generator()
discriminator = construct_discriminator(image_shape)
gan = Sequential()
# Only false for the adversarial model
discriminator.trainable = False
gan.add(generator)
gan.add(discriminator)
optimizer = Adam(lr=0.00015, beta_1=0.5)
gan.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=None)
# Create a dataset Generator with help of keras
dataset_generator = load_dataset(aws_access, aws_secret, dataset_path, batch_size, image_shape)
# 11788 is the total number of images on the bird dataset
number_of_batches = int(11788 / batch_size)
# Variables that will be used to plot the losses from the discriminator and
# the adversarial models
adversarial_loss = np.empty(shape=1)
discriminator_loss = np.empty(shape=1)
batches = np.empty(shape=1)
# Allo plot updates inside for loop
plt.ion()
current_batch = 0
# Let's train the DCGAN for n epochs
for epoch in range(epochs):
print("Epoch " + str(epoch+1) + "/" + str(epochs) + " :")
for batch_number in range(number_of_batches):
start_time = time.time()
# Get the current batch and normalize the images between -1 and 1
real_images = dataset_generator.next()
real_images /= 127.5
real_images -= 1
# The last batch is smaller than the other ones, so we need to
# take that into account
current_batch_size = real_images.shape[0]
# Generate noise
noise = np.random.normal(0, 1,
size=(current_batch_size,) + (1, 1, 100))
# Generate images
generated_images = generator.predict(noise)
# Add some noise to the labels that will be
# fed to the discriminator
real_y = (np.ones(current_batch_size) -
np.random.random_sample(current_batch_size) * 0.2)
fake_y = np.random.random_sample(current_batch_size) * 0.2
# Let's train the discriminator
discriminator.trainable = True
d_loss = discriminator.train_on_batch(real_images, real_y)
d_loss += discriminator.train_on_batch(generated_images, fake_y)
discriminator_loss = np.append(discriminator_loss, d_loss)
# Now it's time to train the generator
discriminator.trainable = False
noise = np.random.normal(0, 1,
size=(current_batch_size * 2,) +
(1, | import os
import boto3
from zipfile import ZipFile | random_line_split |
|
index.d.ts | // the literal value used for the link. If this property is omitted then text is generated from the field values of the document referred to by the link.
}
export interface IFngSchemaTypeFormOpts {
/*
The input type to be generated - which must be compatible with the Mongoose type.
Common examples are email, url.
In addition to the standard HTML5 types there are some 'special' types:
textarea: a textarea control
radio: a radio button control
select: a select control
Note that if the field type is String and the name (or label) contains the string
'password' then type="password" will be used unless type="text".
If the Mongoose schema has an enum array you can specify a radio button group
(instead of a select) by using a type of radio
*/
type?: string;
hidden?: boolean; // inhibits this schema key from appearing on the generated form.
label?: string | null; // overrides the default input label. label:null suppresses the label altogether.
ref?: string; // reference to another collection
internalRef? : IFngInternalLookupReference;
lookupListRef?: IFngLookupListReference;
id?: string; // specifies the id of the input field (which defaults to f_name)
placeHolder?: string // adds placeholder text to the input (depending on data type).
help?: string; // adds help text under the input.
helpInline?: string; // adds help to the right of the input.
popup?: string; // adds popup help as specified.
order?: number; // allows user to specify the order / tab order of this field in the form. This overrides the position in the Mongoose schema.
size?: 'mini' | 'small' | 'medium' | 'large' | 'xlarge' | 'xxlarge' | 'block-level'; // sets control width. Default is 'medium''
readonly?: boolean; // adds the readonly attribute to the generated input (currently doesn't work with date - and perhaps other types).
rows?: number | 'auto'; // sets the number of rows in inputs (such as textarea) that support this. Setting rows to "auto" makes the textarea expand to fit the content, rather than create a scrollbar.
tab?: string; // Used to divide a large form up into a tabset with multiple tabs
showWhen?: IFngShowWhen | string; // allows conditional display of fields based on values elsewhere. string must be an abular expression.
/*
add: 'class="myClass"' allows custom styling of a specific input
Angular model options can be used - for example add: 'ng-model-options="{updateOn: \'default blur\', debounce: { \'default\': 500, \'blur\': 0 }}" '
custom validation directives, such as the timezone validation in this schema
*/
add?: string; // allows arbitrary attributes to be added to the input tag.
class?: string; // allows arbitrary classes to be added to the input tag.
inlineRadio?: boolean; // (only valid when type is radio) should be set to true to present all radio button options in a single line
link?: IFngLinkSetup; // handles displaying links for ref lookups
/*
With a select / radio type you can specify the options.
You can either do this by putting the option values in an array and passing it directly, or by putting them in an
array on the scope and passing the name of the array (which allows run-time modification
*/
options?: Array<string> | string;
/* Directive allows you to specify custom behaviour.
Gets passed attributes from form-input (with schema replaced with the current element - so add can be used to pass data into directives).
*/
directive?: string;
/* Inhibits the forms-angular client from looking up the possible values for a
IFngLookupReference or IFngInternalLookupReference field
(when a directive has a an alternative way of handling things)
*/
noLookup?: boolean;
/*
The next few options relate to the handling and display of arrays (including arrays of subdocuments)
*/
noAdd?: boolean; // inhibits an Add button being generated for arrays.
unshift?: boolean; // (for arrays of sub documents) puts an add button in the sub schema header which allows insertion of new sub documents at the beginning of the array.
noRemove?: boolean; // inhibits a Remove button being generated for array elements.
formstyle?: 'inline' | 'vertical' | 'horizontal' | 'horizontalCompact'; // (only valid on a sub schema) sets style of sub form.
sortable? : boolean; // Allows drag and drop sorting of arrays - requires angular-ui-sortable
/*
The next section relates to the display of sub documents
*/
customSubDoc?: string; // Allows you to specify custom HTML (which may include directives) for the sub doc
customHeader?: string; // Allows you to specify custom HTML (which may include directives) for the header of a group of sub docs
customFooter?: string; // Allows you to specify custom HTML (which may include directives) for the footer of a group of sub docs
}
// Schema passed from server - derived from Mongoose schema
export interface IFieldViewInfo extends IFngSchemaTypeFormOpts {
name: string;
schema?: Array<IFieldViewInfo>;
array?: boolean;
showIf? : any;
required?: boolean;
step? : number;
}
// Schema used internally on client - often derived from IFieldViewInfo passed from server
export interface IFormInstruction extends IFieldViewInfo {
id? : string; // id of generated DOM element
type?: 'string' | 'text' | 'textarea' | 'number' | 'select' | 'link' | 'date' | 'checkbox' | 'password';
defaultValue? : any; | label: string;
options?: any;
ids?: any;
hidden?: boolean;
tab?: string;
add? : string;
ref? : any;
link? : any;
linktext?: string;
linklabel?: boolean;
form?: string; // the form that is linked to
select2? : any; // deprecated
schema?: IFormInstruction[]; // If the field is an array of fields
}
export interface IContainer {
/*
Type of container, which determines markup. This is currently only available when the schema is generated by
the client for use independent of the BaseController
In the case of a string which does not match one of the predefined options
the generated container div is given the class of the name
*/
containerType: 'fieldset' | 'well' | 'tabset' | 'tab' | 'well-large' | 'well-small' | string;
title?: string;
/*
h1...h6 will use a header style
anything else will be used as a paragraph stype
*/
titleTagOrClass? : string;
content: IFormInstruction[];
}
export type IFormSchemaElement = IFormInstruction | IContainer;
export type IFormSchema = IFormSchemaElement[];
export type IControlledFormSchema = IFormInstruction[];
export interface IEnumInstruction {
repeat: string;
value: string;
label? : string;
}
export interface IFngCtrlState {
master: any;
allowLocationChange: boolean; // Do we allow location change or prompt for permission
}
export interface IRecordHandler {
convertToMongoModel(schema: IControlledFormSchema, anObject: any, prefixLength: number, scope: IFormScope): any;
createNew(dataToSave: any, options: any, scope: IFormScope, ctrlState: IFngCtrlState): void;
deleteRecord(model: any, id: any, scope: IFormScope, ctrlState: any): void;
updateDocument(dataToSave : any, options: any, scope: IFormScope, ctrlState: IFngCtrlState) : void;
readRecord($scope: IFormScope, ctrlState);
scrollTheList($scope: IFormScope);
getListData(record, fieldName, listSchema?, $scope?: IFormScope);
suffixCleanId(inst, suffix);
setData(object, fieldname, element, value);
setUpLookupOptions(lookupCollection, schemaElement, $scope: IFormScope, ctrlState, handleSchema);
setUpLookupListOptions: (ref: IFngLookupListReference, formInstructions: IFormInstruction, $scope: IFormScope, ctrlState: IFngCtrlState) => void;
handleInternalLookup($scope: IFormScope, formInstructions, ref): void;
preservePristine(element, fn): void;
convertIdToListValue(id, idsArray, valuesArray, fname);
decorateScope($scope:IFormScope, $uibModal, recordHandlerInstance : IRecordHandler, ctrlState);
fillFormFromBackendCustomSchema(schema, $scope:IFormScope, formGeneratorInstance, recordHandlerInstance, ctrlState);
fillFormWithBackendSchema($scope: IFormScope, formGeneratorInstance, recordHandlerInstance, ctrlState);
handleError($scope: IFormScope);
}
export interface IFormGenerator {
generateEditUrl(obj, $scope:IForm | rows? : number; | random_line_split |
world.py | scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def add_electrode(self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
|
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len | '''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0 | identifier_body |
world.py |
class World:
'''
A General, Brand New World
Units:
(potential) energy: eV
length: __scale
frequency: Hz
Axis convention in consistance with <class Electrode>
z: axial
* It doesn't matter whether z is parallel or vertical to the surface or not
Attributes:
__scale :: the typical length in meter. Length unit in the code is self.__scale meter(s)
omega_rf:: the RF ANGULAR frequency
m :: the mass a single ion
bounds :: the boundaies of this world
dc_electrode_list :: a list of (name, electrode) s of dc electrodes
rf_electrode_list :: a list of (name, electrode) s of rf electrodes
electrode_dict :: dictionary that electrode_dict[name] = ("dc" or "rf", electrode)
_pseudopot_factor :: the factor in front of the pseudopotential
Methods:
'''
def __init__(self, ionA, omega_rf, scale=1):
"""
__init__(self, ionA, omega_rf, scale=1):
ionA: mass number of the ion
omega_rf: the RF ANGULAR frequency
scale : the typical length in meter. Length unit in the code is self.__scale meter(s)
"""
self.omega_rf = omega_rf
self.m = ionA * amu
self.__scale = scale
self._pseudopot_factor = qe/(4*self.m*(omega_rf**2))/(scale**2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def add_electrode(self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
|
from .SH import funcSHexp
from .utils import quadru2hess, intersectBounds
amu = 1.66054e-27
| random_line_split |
|
world.py | **2)
self.bounds = None # if no boundary, then None
self.electrode_dict = {}
self.rf_electrode_list = []
self.dc_electrode_list = []
def | (self, e, name, kind, volt):
"""
Add an electrode to the World. Name it with `name.
If kind == 'rf', then add this electrode to the rf electrode dict
as well as to the general electrode dict
"""
e.volt = volt
self.electrode_dict[name] = (kind, e)
if kind=='dc':
self.dc_electrode_list.append((name,e))
if kind=='rf':
self.rf_electrode_list.append((name,e))
def compute_dc_potential(self, r):
v = 0
for nam, e in self.dc_electrode_list:
v += e.compute_potential(r)
return v # the potential energy is automatically electron volts
def compute_dc_field(self, r):
E = np.zeros(3)
for nam, e in self.dc_electrode_list:
E += e.compute_electric_field(r)
return E
def compute_dc_hessian(self, r):
hess = np.zeros((3,3))
for nam, e in self.dc_electrode_list:
hess += e.compute_hessian(r)
return hess
def compute_rf_field(self, r):
"""
Just add up the electric field due to all the rf electrodes
not the gradient of pseudopotential
"""
E = np.zeros((3))
for nam, e in self.rf_electrode_list:
E += e.compute_electric_field(r)
return E
def check_bound(self):
self.bounds = intersectBounds([typ_elec[1].get_region_bounds() for typ_elec in self.electrode_dict.values()])
def compute_rf_null(self, z, xy0=(0,0), onyz=False, bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range | add_electrode | identifier_name |
world.py | , bounds=None):
"""
Search the RF null on a certain axial position
xy0: initial guess
onyz: whether restrict y=y0 or not
bounds: array([[xmin, xmax],[ymin, ymax], ...]) shaped (2,2) or (3,2)
"""
if bounds is None:
self.check_bound()
if self.bounds is None:
print("Cannot carry out RF null searching without bounds")
return
else:
bounds = self.bounds
if onyz: # x=0 required
fo = lambda y: sum(self.compute_rf_field(np.array([xy0[0],y,z]))**2)
ym = minimize_scalar(fo, bounds=tuple(bounds[1]), method="Bounded")
if ym.success:
ymy = ym.x
else:
print("@ z=%.3fmm Optimization Failed:"%z, ym.message, '. Returning initial value')
ymy = xy0[1]
yi = np.linspace(bounds[1,0],bounds[1,1],30)
plt.plot(yi, [fo(yy) for yy in yi], label="%.3f"%z)
plt.xlabel("y/mm")
plt.ylabel(r"$E_{RF}^2/\mathrm{(V^2mm^{-2})}$")
plt.title("RF null @x = 0")
# if ym.success:
# plt.plot([ymy],[ym.fun],'x')
# else:
# plt.legend(title="z/mm")
plt.show()
return np.array([xy0[0],ymy,z])
else:
foo= lambda xy: norm(self.compute_rf_field(np.array([xy[0],xy[1],z])))
if self.bounds is None:
xym = minimize(foo, xy0)
else:
xym = minimize(foo, xy0, bounds=bounds[:2])
if xym.success:
return np.array([xym.x[0],xym.x[1],z])
else:
print("Optimization Failure:", xym.message, '. Returning initial value')
return np.array([xy0[0],xy0[1],z])
def compute_pseudopot(self, r):
"""pseudopotential in eV"""
return self._pseudopot_factor*sum(self.compute_rf_field(r)**2)
def compute_pseudopot_hessian_atNULL(self, r):
"""This only valid when r is a RF null!"""
hess = np.zeros((3,3))
for nm, e in self.rf_electrode_list:
hess += e.compute_hessian(r)
return 2*self._pseudopot_factor*(np.dot(hess, hess.T))
def compute_pseudopot_frequencies(self, r):
'''
This is only valid if xp, yp, zp is the trapping position. Return frequency (i.e. omega/(2*pi))
'''
hessdiag = nd.Hessdiag(self.compute_pseudopot,step=1e-6)(r)/(self.__scale**2)
'''
Now d2Udx2 has units of J/m^2. Then w = sqrt(d2Udx2/(mass)) has units of angular frequency
'''
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), hessdiag>0
def compute_dc_potential_frequencies(self, r):
'''
As always, this is valid only at the trapping position. Return frequency (not angular frequency)
'''
H = self.compute_dc_hessian(r)
hessdiag, eigvec = eigh(H)
hessdiag /= self.__scale**2
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
return np.sqrt(qe*abs(hessdiag)/self.m)/(2*np.pi), eigvec, hessdiag>0
def compute_full_potential(self, r):
# Full potential in eV
return self.compute_pseudopot(r) + self.compute_dc_potential(r)
# def compute_full_potential_frequencies(self, r):
# '''
# As always, this is valid only at the trapping position. Return frequency (not angular frequency)
# '''
# joule_to_ev = 6.24150934e18 # conversion factor to take joules -> eV
# ev_to_joule = 1.60217657e-19
# m = 6.64215568e-26 # 40 amu in kg
# H = nd.Hessian(self.compute_full_potential)(r)
# freq = np.linalg.eig(H)
# hessdiag = freq[0]
# eigvec = freq[1]
# # hessdiag = nd.Hessdiag( self.compute_total_dc_potential )(r)
# d2Udx2 = ev_to_joule*hessdiag[0]
# d2Udy2 = ev_to_joule*hessdiag[1]
# d2Udz2 = ev_to_joule*hessdiag[2]
# fx = np.sqrt(abs(d2Udx2)/m)/(2*np.pi)
# fy = np.sqrt(abs(d2Udy2)/m)/(2*np.pi)
# fz = np.sqrt(abs(d2Udz2)/m)/(2*np.pi)
# return [fx, fy, fz], eigvec
def local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1):
"""
local_multipole_arr(self, position, loc_multipoles=['C','Ex','Ey','Ez','U1','U2','U3','U4','U5'], ctrl_electrodes='alldc', r0=1)
Parameters:
position :: a (3,) array indicating the position of interest
loc_multipoles :: a list of multipoles that are of interest at this position
ctrl_electrodes :: a list of the INDICES of dc electrodes of interest
returns the matrix, shaped (len(loc_multipoles), len(ctrl_electrodes)), that maps DC voltages on `ctrl_electrodes to `loc_multipoles at `position
"""
self.loc_multipoles = loc_multipoles
if isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc':
ctrl_electrodes = range(len(self.dc_electrode_list))
multipole_arr = np.empty((len(loc_multipoles), len(ctrl_electrodes)),'d')
for i, j in enumerate(ctrl_electrodes):
nam, elec = self.dc_electrode_list[j]
elec.expand_in_multipoles(position, loc_multipoles, r0)
multipole_arr[:,i] = [elec.multipole_dict[multipole] for multipole in loc_multipoles]
return multipole_arr
def multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
"""
multipole_control_vdc_arr(self, pos_ctrl_mults, ctrl_electrodes='alldc', cost_Q='id', r0=1):
Parameters:
position :: a (3,) array indicating the position of interest
pos_ctrl_mults :: a list of (position, list of controlled local multipoles) pairs or a single pair
ctrl_electrodes :: a list of the INDICES of dc electrodes to be multipole-controlled
costQ :: the positive definite matrix Q in the cost function
return: The matrix, shaped (len(self.dc_electrodes), n_mult), controls DC voltages on ctrl_electrodes for pos_ctrl_mults.
n_mult = sum([len(pos_ctrl_mult[1]) for pos_ctrl_mult in pos_ctrl_mults])
Rows that correspond to electrodes that are not multipole-controlled are padded with 0
"""
alle = (isinstance(ctrl_electrodes,str) and ctrl_electrodes=='alldc')
if alle:
ctrl_electrodes = range(len(self.dc_electrode_list))
# Support inputing a single (position, list of controlled local multipoles) pair
if isinstance(pos_ctrl_mults, tuple) and len(pos_ctrl_mults)==2:
| pos_ctrl_mults = [pos_ctrl_mults] | conditional_block |
|
train.py | t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
|
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py | raise ValueError(f"Unrecognized model argument: {args.model}") | conditional_block |
train.py | t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.") | parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py")
| parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.") | random_line_split |
train.py | t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
|
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs_test_loader, "rhs": rhs_test_loader}
# add the inverted relations into the rel2id dict
invrel2id = {f"INV_{rel_name}": rel_id + num_relations for rel_name, rel_id in rel2id.items()}
rel2id = {**rel2id, **invrel2id}
return train_loader, valid_loaders, test_loaders, data["filters"], data["ent2id"], rel2id
def main():
parser = argparse.ArgumentParser("train.py | patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler | identifier_body |
train.py | (parser):
# Data options
parser.add_argument("--data", required=True, type=str, help="Name of data set folder")
parser.add_argument("--run_id", required=True, type=str, help="Name of model/run to export")
# Model
parser.add_argument("--model", default="tgattnspd", type=str, help="Model type: tgspd, tgattnspd")
parser.add_argument("--metric", default="riem", type=str, help=f"Metrics: {[t.value for t in list(MetricType)]}")
parser.add_argument("--dims", default=4, type=int, help="Dimensions for the model.")
parser.add_argument("--train_bias", dest='train_bias', action='store_true', default=False,
help="Whether to train scaling or not.")
parser.add_argument("--use_hrh", default=1, type=int, help="Whether to use HRH or RHR in lhs op.")
parser.add_argument("--inverse_tail", default=0, type=int, help="Whether to use t or t^-1 as tail")
parser.add_argument("--loss", choices=["BCELoss", "HingeLoss"], default="BCELoss", help="Loss function")
parser.add_argument("--hinge_margin", default=1, type=float, help="Margin for hinge loss function")
parser.add_argument("--regularizer", choices=["N3", "F2"], default="N3", help="Regularizer")
parser.add_argument("--regularizer_weight", default=0, type=float, help="Regularization weight")
parser.add_argument("--neg_sample_size", default=1, type=int, help="Negative sample size, -1 to not use")
parser.add_argument("--double_neg", action="store_true", default=False,
help="Whether to negative sample both head and tail entities")
# optim and config
parser.add_argument("--optim", default="adam", type=str, help="Optimization method.")
parser.add_argument("--amsgrad", action="store_true", default=False, help="Use AMS grad in Adam or AdamW")
parser.add_argument("--learning_rate", default=1e-3, type=float, help="Starting learning rate.")
parser.add_argument("--reduce_factor", default=2, type=float, help="Factor to reduce lr on plateau.")
parser.add_argument("--weight_decay", default=0.00, type=float, help="L2 Regularization.")
parser.add_argument("--val_every", default=5, type=int, help="Runs validation every n epochs.")
parser.add_argument("--patience", default=50, type=int, help="Epochs of patience for scheduler and early stop.")
parser.add_argument("--max_grad_norm", default=50.0, type=float, help="Max gradient norm.")
parser.add_argument("--batch_size", default=1000, type=int, help="Batch size.")
parser.add_argument("--eval_batch_size", default=100, type=int, help="Eval batch size. Has impact only on memory")
parser.add_argument("--epochs", default=100, type=int, help="Number of training epochs.")
parser.add_argument("--burnin", default=10, type=int, help="Number of initial epochs to train with reduce lr.")
parser.add_argument("--grad_accum_steps", default=1, type=int,
help="Number of update steps to accumulate before backward.")
parser.add_argument("--subsample", default=-1, type=float, help="Subsamples a % of valid triples")
# Others
parser.add_argument("--dtype", default="single", type=str, choices=["single", "double"], help="Machine precision")
parser.add_argument("--local_rank", type=int, help="Local process rank assigned by torch.distributed.launch")
parser.add_argument("--n_procs", default=4, type=int, help="Number of process to create")
parser.add_argument("--load_ckpt", default="", type=str, help="Load model from this file")
parser.add_argument("--results_file", default="out/results.csv", type=str, help="Exports final results to this file")
parser.add_argument("--save_epochs", default=10001, type=int, help="Exports every n epochs")
parser.add_argument("--seed", default=42, type=int, help="Seed")
parser.add_argument("--debug", dest='debug', action='store_true', default=False, help="Debug mode")
def load_ckpt(args, log):
saved_data = torch.load(args.load_ckpt) if args.load_ckpt else {}
args.init_epoch = 0
args.current_iter = 1
if saved_data:
local_rank = args.local_rank
if local_rank == 0:
log.info(f"Loaded CKPT: {args.load_ckpt}, Args in ckpt: {saved_data['args']}")
args = saved_data["args"]
args.local_rank = local_rank
args.init_epoch = saved_data["epochs"]
args.current_iter += 1
return args, saved_data
def get_model(args, saved_data=None):
if args.model == "tgspd":
model = TgSPDModel(args)
elif args.model == "tgrotspd":
model = TgSPDRotationModel(args)
elif args.model == "tgrefspd":
model = TgSPDReflectionModel(args)
elif args.model == "tgattnspd":
model = TgSPDAttnModel(args)
else:
raise ValueError(f"Unrecognized model argument: {args.model}")
model.to(config.DEVICE)
model = DistributedDataParallel(model, device_ids=None)
if saved_data:
model.load_state_dict(saved_data["model_state"])
return model
def get_optimizer(model, args, saved_data=None):
if args.optim == "sgd":
optim = SGD(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
elif args.optim == "adam":
optim = Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adamw":
optim = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay, amsgrad=args.amsgrad)
elif args.optim == "adagrad":
optim = Adagrad(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
else:
raise ValueError(f"Unkown --optim option: {args.optim}")
if saved_data:
optim.load_state_dict(saved_data["optimizer_state"])
return optim
def get_scheduler(optimizer, args, saved_data=None):
patience = round(args.patience / args.val_every)
factor = 1 / float(args.reduce_factor)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=factor, mode="max")
if saved_data:
scheduler.load_state_dict(saved_data["scheduler_state"])
return scheduler
def build_data_loader(split, batch_size, shuffle, args):
"""
:param split: torch.LongTensor b x 3 with triples (h, r, t)
:param batch_size: int
:param shuffle: bool
:param args:
:return: torch DataLoader set up with distributed sampler
"""
tensor_dataset = TensorDataset(split)
sampler = DistributedSampler(tensor_dataset, num_replicas=args.n_procs, rank=args.local_rank, shuffle=shuffle)
data_loader = DataLoader(dataset=tensor_dataset, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True, sampler=sampler)
return data_loader
def load_training_data(args, log):
data_path = config.PREP_PATH / f"{args.data}/{config.PREPROCESSED_FILE}"
log.info(f"Loading data from {data_path}")
data = torch.load(str(data_path))
rel2id = data["rel2id"]
num_relations = len(rel2id)
train, valid, test = data["train"], data["valid"], data["test"]
if args.debug:
train = train[:10]
valid = valid[:4]
test = test[:4]
args.batch_size = 40
augmented = get_inverted_triples(train, num_relations)
train = np.vstack((train, augmented))
valid_proportion = args.subsample if args.subsample > 0 else 1
valid = valid[:int(len(valid) * valid_proportion)]
inverted_valid = get_inverted_triples(valid, num_relations)
inverted_test = get_inverted_triples(test, num_relations)
train, valid, inverted_valid, test, inverted_test = [torch.LongTensor(split) for split in
[train, valid, inverted_valid, test, inverted_test]]
train_batch_size = args.batch_size // args.n_procs
eval_batch_size = args.eval_batch_size // args.n_procs
log.info(f"Batch size {train_batch_size} for {args.local_rank}/{args.n_procs} processes")
train_loader = build_data_loader(train, train_batch_size, shuffle=True, args=args)
lhs_valid_loader = build_data_loader(valid, eval_batch_size, shuffle=False, args=args)
rhs_valid_loader = build_data_loader(inverted_valid, eval_batch_size, shuffle=False, args=args)
lhs_test_loader = build_data_loader(test, eval_batch_size, shuffle=False, args=args)
rhs_test_loader = build_data_loader(inverted_test, eval_batch_size, shuffle=False, args=args)
valid_loaders = {"lhs": lhs_valid_loader, "rhs": rhs_valid_loader}
test_loaders = {"lhs": lhs | config_parser | identifier_name |
|
feffpath.py | def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def wrapper(*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
# calculate basic (unaltered) XAFS contributions
path.calcuate_xafs()
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
| cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', ' | class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr) | identifier_body |
feffpath.py | def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def wrapper(*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
|
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', ' | # calculate basic (unaltered) XAFS contributions
path.calcuate_xafs() | random_line_split |
feffpath.py | self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', 'ellip', 'nnnn_out', 'json_out', 'verbose',
'nepts'):
setattr(self, attr, getattr(args, attr).contents.value)
for attr in ('ipot', 'evec', 'xivec', 'beta', 'eta', 'ri', 'rat',
'iz', 'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
setattr(self, attr, np.array(getattr(args, attr).contents[:]))
# some data needs recasting, reformatting
self.nnnn_out = bool(self.nnnn_out)
self.json_out = bool(self.json_out)
self.verbose = bool(self.verbose)
self.rat = self.rat.reshape((2+FEFF_maxleg, 3)).transpose()*BOHR
if __name__ == '__main__':
path = ScatteringPath(phase_file='phase.pad')
path.set_absorber( x=0.01, y=0.1, z=0.01)
path.add_scatterer(x=1.8058, y=0.005, z=1.8063, ipot=1)
path.degen = 12
path.calculate_xafs()
print('# Calculate EXAFS with PhaseFile: {:s}'.format(path.phase_file))
print('# Path Geometry: \n# IPOT IZ X Y Z')
for i in range(path.nleg):
ipot = path.ipot[i]
iz = path.iz[ipot]
rat = path.rat[:,i]
print("# %2i %2i %8.4f %8.4f %8.4f" % (ipot,iz, rat[0], rat[1], rat[2]))
print("# Polarization: {:d}, ellipticity={:4f}".format(path.ipol, path.ellip))
print("# Polarization E Vector = {:s}".format(", ".join(["%.4f" % a for a in path.evec])))
print("# Polarization X Vector = {:s}".format(", ".join(["%.4f" % a for a in path.xivec])))
print("# Path Settings")
for attr in ('rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach'):
print("# {:8s} = {:+4f} ".format(attr, getattr(path, attr)))
for attr in ('exch_label', 'genfmt_version'):
| print("# {:8s} = {:s} ".format(attr, getattr(path, attr))) | conditional_block |
|
feffpath.py | def bytes2str(val):
if isinstance(val, str):
return val
if isinstance(val, bytes):
return str(val, 'utf-8')
return str(val)
def str2bytes(val):
if isinstance(val, bytes):
return val
return bytes(val, 'utf-8')
def with_phase_file(fcn):
"""decorator to ensure that the wrapped function either
has a non-None 'phase_file' argument or that that
self.phase_file is not None
"""
errmsg = "function '%s' needs a non-None phase_file"
def | (*args, **keywords):
"needs phase_file"
phase_file = keywords.get('phase_file', None)
if phase_file is None:
phase_file = getattr(args[0], 'phase_file', None)
if phase_file is None:
raise AttributeError(errmsg % fcn.__name__)
else:
setattr(args[0], 'phase_file', phase_file)
# raise Warning(errmsg % fcn.__name__)
return fcn(*args, **keywords)
wrapper.__doc__ = fcn.__doc__
wrapper.__name__ = fcn.__name__
wrapper.__filename__ = fcn.__code__.co_filename
wrapper.__dict__.update(fcn.__dict__)
return wrapper
class ScatteringPath(object):
"""A Scatering Path for calculating a XAFS signal with Feff
A calculation requires a Potentials and Phase Shift calculation
in PAD format from Feff85, and a list of scattering paths
Usage:
------
# create path
path = ScatteringPath(phase_file='phase.pad')
# list 'ipot' and labels for absorber, scattererers
path.list_scatterers()
# set coords for absorbing atom
path.set_absorber(x=0., y=0., z=0.)
# add scattering atom
path.add_scatterer(x=1.5, y=1.5, z=1.5, ipot=1)
# calculate basic (unaltered) XAFS contributions
path.calcuate_xafs()
"""
def __init__(self, phase_file=None):
self.phase_file = phase_file
self.clear()
def clear(self):
"""reset all path data"""
self.index = 1
self.degen = 1.
self.nnnn_out = False
self.json_out = False
self.verbose = False
self.ipol = 0
self.ellip = 0.
self.nepts = 0
self.genfmt_order = 2
self.genfmt_vers = ""
self.exch_label = ""
self.rs = 0.
self.vint = 0.
self.xmu = 0.
self.edge = 0.
self.kf = 0.
self.rnorman = 0.
self.gamach = 0.
self.nepts = FEFF_maxpts
dargs = dict(dtype=np.float64, order='F')
largs = dict(dtype=np.int32, order='F')
self.evec = np.zeros(3, **dargs)
self.xivec = np.zeros(3, **dargs)
self.ipot = np.zeros(1+FEFF_maxleg, **largs)
self.beta = np.zeros(1+FEFF_maxleg, **dargs)
self.eta = np.zeros(2+FEFF_maxleg, **dargs)
self.ri = np.zeros(FEFF_maxleg, **dargs)
self.rat = np.zeros((3, 2+FEFF_maxleg), **dargs)
self.iz = np.zeros(1+FEFF_maxpot, **largs)
self.kfeff = np.zeros(FEFF_maxpts, **dargs)
self.real_phc = np.zeros(FEFF_maxpts, **dargs)
self.mag_feff = np.zeros(FEFF_maxpts, **dargs)
self.pha_feff = np.zeros(FEFF_maxpts, **dargs)
self.red_fact = np.zeros(FEFF_maxpts, **dargs)
self.lam = np.zeros(FEFF_maxpts, **dargs)
self.rep = np.zeros(FEFF_maxpts, **dargs)
self.nleg = 1
@with_phase_file
def list_scatterers(self, phase_file=None):
"""list Feff Potentials atoms ('ipots') fo phase file"""
atoms = []
with open(self.phase_file,'r') as fh:
line1_words = fh.readline().strip().split()
text = fh.readlines()
nphases = int(line1_words[4])
for line in text[4:]:
if line.startswith('$'): continue
words = line.split()
atoms.append((int(words[1]), words[2]))
if len(atoms) > nphases:
break
out = ["# Potential Z Symbol"]
for ipot, atom in enumerate(atoms):
out.append(" %2i %3i %s" % (ipot, atom[0], atom[1]))
return "\n".join(out)
@with_phase_file
def set_absorber(self, x=0., y=0., z=0., phase_file=None):
"""set coordinates for absorbing atom ('ipot'=0)"""
self.rat[0, 0] = x
self.rat[1, 0] = y
self.rat[2, 0] = z
@with_phase_file
def add_scatterer(self, x=0., y=0., z=0., ipot=1, phase_file=None):
self.rat[0, self.nleg] = x
self.rat[1, self.nleg] = y
self.rat[2, self.nleg] = z
self.ipot[self.nleg] = ipot
self.nleg += 1
# set final atom coords to same as absorber
self.rat[0, self.nleg] = self.rat[0, 0]
self.rat[1, self.nleg] = self.rat[1, 0]
self.rat[2, self.nleg] = self.rat[2, 0]
self.ipot[self.nleg] = self.ipot[0]
@with_phase_file
def calculate_xafs(self, phase_file=None):
class args: pass
# strings / char*. Note fixed length to match Fortran
args.phase_file = (self.phase_file + ' '*256)[:256]
args.exch_label = str2bytes(' '*8)
args.genfmt_version = str2bytes(' '*30)
# integers, including booleans
for attr in ('index', 'nleg', 'genfmt_order', 'ipol', 'nnnn_out',
'json_out', 'verbose', 'nepts'):
setattr(args, attr, pointer(c_long(int(getattr(self, attr)))))
# doubles
for attr in ('degen', 'rs', 'vint', 'xmu', 'edge', 'kf', 'rnorman',
'gamach', 'ellip'):
setattr(args, attr, pointer(c_double(getattr(self, attr))))
# integer arrays
for attr in ('ipot', 'iz'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_int))
setattr(args, attr, cdata)
# double arrays
self.rat = self.rat/BOHR
for attr in ('evec', 'xivec', 'rat', 'ri', 'beta', 'eta',
'kfeff', 'real_phc', 'mag_feff', 'pha_feff',
'red_fact', 'lam', 'rep'):
arr = getattr(self, attr)
cdata = arr.ctypes.data_as(POINTER(arr.size*c_double))
setattr(args, attr, cdata)
x = FLIB.calc_onepath(args.phase_file, args.index, args.nleg,
args.degen, args.genfmt_order,
args.exch_label, args.rs, args.vint,
args.xmu, args.edge, args.kf, args.rnorman,
args.gamach, args.genfmt_version, args.ipot,
args.rat, args.iz, args.ipol, args.evec,
args.ellip, args.xivec, args.nnnn_out,
args.json_out, args.verbose, args.ri,
args.beta, args.eta, args.nepts, args.kfeff,
args.real_phc, args.mag_feff, args.pha_feff,
args.red_fact, args.lam, args.rep)
self.exch_label = bytes2str(args.exch_label).strip()
self.genfmt_version = bytes2str(args.genfmt_version).strip()
for attr in ('index', 'nleg', 'genfmt_order', 'degen', 'rs',
'vint', 'xmu', 'edge', 'kf', 'rnorman', 'gamach',
'ipol', ' | wrapper | identifier_name |
main.rs | 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if !self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
}
fn do_init() {
//mut and default immutable
let mut i = 0;
println!("init i :{}", i);
i = 100;
println!("change i: {}", i);
// const declare
const MAX_POINTS: u32 = 100_000;
println!("constant variable MAX_POINT: {}", MAX_POINTS);
//shadowing
let x = 5;
let x = x + 1;
let x = x * 2;
println!("The value of x is: {}", x);
let spaces = " ";
let spaces = spaces.len();
println!("space number :{}", spaces);
// floating-point numbers
do_float();
//guess_num()
}
use std::fmt;
fn show_item<T: fmt::Display>(item: T) {
println!("Item: {}", item);
}
struct CanDisplay;
impl fmt::Display for CanDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "CanDisplay")
}
}
struct AlsoDisplay;
impl fmt::Display for AlsoDisplay {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AlsoDisplay")
}
}
//1. Static Dispatch
fn do_static_dispatch() {
let a: CanDisplay = CanDisplay;
let b: AlsoDisplay = AlsoDisplay;
show_item(a); // stdout `Item: CanDisplay`
show_item(b); // stdout `Item: AlsoDisplay`
}
fn get_numbers(a: u32, b: u32) -> impl Iterator<Item = u32> {
(a..b).filter(|x| x % 100 == 0)
}
//2. Dynamic Dispatch
// impl trait
fn do_advanced_trait() {
for n in get_numbers(100, 1001) {
print!("{} \t", n);
}
}
//3. Specifying Placeholder Types in Trait Definitions with Associated Types
// pub trait Iterator {
// type Item;
// fn next(&mut self) -> Option<Self::Item>; | random_line_split |
||
main.rs | y: other.y,
}
}
}
fn do_trait() {
let number_list = vec![34, 50, 25, 100, 65];
let result = get_gt(&number_list);
println!("The largest number is {}", result);
let char_list = vec!['y', 'm', 'a', 'q'];
let result = get_gt(&char_list);
println!("The largest char is {}", result);
}
fn do_generic() {
let number_list = vec![34, 50, 25, 100, 65];
let result = largest(&number_list);
println!("The largest number is {}", result);
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8];
let result = largest(&number_list);
println!("The largest number is {}", result);
let p1 = Point { x: 5, y: 10.4 };
let p2 = Point { x: "Hello", y: 'c' };
let p3 = p1.mixup(p2);
println!("p3.x = {}, p3.y = {}", p3.x, p3.y);
do_trait()
}
fn do_closure() {
let v1 = vec![1, 2, 3];
let v1_iter = v1.iter();
let total: i32 = v1_iter.sum();
assert_eq!(total, 6);
let v1: Vec<i32> = vec![1, 2, 3];
let v2: Vec<_> = v1.iter().map(|x| x + 1).collect();
assert_eq!(v2, vec![2, 3, 4]);
guessing_number::run_shoes_test();
guessing_number::calling_next_directly();
}
fn do_smart_p() {
let x = 5;
let y = &x;
assert_eq!(5, x);
assert_eq!(5, *y);
let x1 = 5;
let y1 = Box::new(x);
assert_eq!(5, x1);
assert_eq!(5, *y1);
}
fn do_concurrency() {
use std::thread;
use std::time::Duration;
let handle = thread::spawn(|| {
for i in 1..6 {
println!("hi number {} from the spawned thread!", i);
thread::sleep(Duration::from_millis(1));
}
});
for i in 1..5 {
println!("hi number {} from the main thread!", i);
thread::sleep(Duration::from_millis(1));
}
handle.join().unwrap();
do_concurrency1();
}
fn do_concurrency1() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let vals = vec![
String::from("你好!"),
String::from("你去做什么?"),
String::from("Why?"),
String::from("那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
// thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
do_concurrency2();
do_concurrency3();
do_match()
}
fn do_match_p() {
println!("one");
}
fn do_match() {
let x = 1;
match x {
1 => do_match_p(),
2 => println!("two"),
3 => println!("three"),
_ => println!("anything"),
}
//Matching Named Variables
let x = Some(5);
let y = 10;
match x {
Some(50) => println!("Got 50"),
Some(y) => println!("Matched, y = {:?}", y),
_ => println!("Default case, x = {:?}", x),
}
println!("at the end: x = {:?}, y = {:?}", x, y);
let x = 1;
match x {
1 | 2 => println!("one or two"),
3 => println!("three"),
_ => println!("anything"),
}
let x = 2;
match x {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(& | self | identifier_name |
|
main.rs | {
1...5 => println!("one through five"),
_ => println!("something else"),
}
let x = 'A';
match x {
'a'...'j' => println!("early ASCII letter"),
'k'...'z' => println!("late ASCII letter"),
'A'...'Z' => println!("UP ASCII letter"),
_ => println!("something else"),
}
//Destructuring to Break Apart Values
let p = Point { x: 0, y: 7 };
match p {
Point { x, y: 0 } => println!("On the x axis at {}", x),
Point { x: 0, y } => println!("On the y axis at {}", y),
Point { x, y } => println!("On neither axis: ({}, {})", x, y),
}
let msg = Message::ChangeColor(Color::Hsv(0, 160, 255));
match msg {
Message::ChangeColor(Color::Rgb(r, g, b)) => {
println!("Change the color to red {}, green {}, and blue {}", r, g, b)
}
Message::ChangeColor(Color::Hsv(h, s, v)) => println!(
"Change the color to hue {}, saturation {}, and value {}",
h, s, v
),
_ => (),
}
//bind
do_match1();
//Rust's unsafe code
do_unsafe();
}
//Rust unsafe code demo
fn do_unsafe() {
//doesn’t enforce these memory safety guarantees.
//Gaining extra superpowers.
//You can take four actions in unsafe Rust
//Dereference a raw pointer
//Call an unsafe function or method
//Access or modify a mutable static variable
//Implement an unsafe trait
}
fn do_match1() {
let msg = MessageNum::Hello { id: 5 };
match msg {
MessageNum::Hello {
id: id_variable @ 3...7,
} => println!("Found an id in range: {}", id_variable),
MessageNum::Hello { id: 10...12 } => println!("Found an id in another range"),
MessageNum::Hello { id } => println!("Found some other id: {}", id),
}
}
enum MessageNum {
Hello { id: i32 },
}
enum Color {
Rgb(i32, i32, i32),
Hsv(i32, i32, i32),
}
enum Message {
Quit,
Move { x: i32, y: i32 },
Write(String),
ChangeColor(Color),
}
//Similarities Between RefCell<T>/Rc<T> and Mutex<T>/Arc<T>
fn do_concurrency2() {
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
//VIP: producer and consumer model
let (tx, rx) = mpsc::channel();
let tx1 = mpsc::Sender::clone(&tx);
thread::spawn(move || {
let vals = vec![
String::from("1:你好!"),
String::from("1:你去做什么?"),
String::from("1:Why?"),
String::from("1:那很好呀!"),
];
for val in vals {
tx1.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
thread::spawn(move || {
let vals = vec![
String::from("2:你好!"),
String::from("2:你去做什么?"),
String::from("2:Why?"),
String::from("2:那很好呀!"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
for received in rx {
println!("Got: {}", received);
}
}
fn do_concurrency3() {
use std::sync::{Arc, Mutex};
use std::thread;
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = thread::spawn(move || {
let mut num = counter.lock().unwrap();
*num += 1;
});
handles.push(handle);
}
for handle in handles {
println!("Result: {}", *counter.lock().unwrap());
handle.join().unwrap();
}
println!("Result: {}", *counter.lock().unwrap());
}
trait Show {
fn show(&self) -> String;
}
impl Show for i32 {
fn show(&self) -> String {
format!("i32 value : {}", self)
}
}
impl Show for f64 {
fn show(&self) -> String {
format!("f64 value : {}", self)
}
}
trait Quack {
fn quack(&self);
}
struct Duck();
impl Quack for Duck {
fn quack(&self) {
println!("quack!");
}
}
struct RandomBird {
is_a_parrot: bool,
}
impl Quack for RandomBird {
fn quack(&self) {
if !self.is_a_parrot {
println!("quack!");
} else {
println!("squawk!");
}
}
}
// and why the hell not!
impl Quack for i32 {
fn quack(&self) {
for i in 0..*self {
print!("quack {} ", i);
}
println!();
}
}
trait Name {
fn name(&self) -> String;
fn upper_case(&self) -> String {
self.name().to_uppercase()
}
}
struct Toy();
impl Name for Toy {
fn name(&self) -> String {
"Toy".to_string()
}
}
fn quack() {
let duck1 = Duck();
let duck2 = RandomBird { is_a_parrot: false };
let parrot = RandomBird { is_a_parrot: true };
let i = 4;
let ducks: Vec<&Quack> = vec![&duck1, &duck2, &parrot, &i];
for d in &ducks {
d.quack();
}
let t = Toy();
assert_eq!(t.name(), "Toy".to_string());
assert_eq!(t.upper_case(), "TOY".to_string());
}
fn do_oop() {
let nvalue = Box::new(78);
let fvalue = Box::new(98.88);
let vc: Vec<Box<Show>> = vec![nvalue, fvalue];
for d in &vc {
println!("show {}", d.show());
}
//oop interface
quack();
}
fn do_float() {
let x = 2.0; // f64
let y: f32 = 3.0; // f32
println!("x:{}, y:{} ", x, y);
do_compound();
//expression
println!("zero number ; {}", zero_plus(23));
let a = [10, 20];
for element in a.iter() {
println!("the value is: {}", element);
}
for number in (1..4).rev() {
print!("{}, ", number);
}
//slice
let s = String::from("The Rust Programming Language");
let s1 = &s;
let s2 = &s;
println!("s1: {}, s2: {}", s1, s2);
let s3 = &s;
println!("s3: {}", s3);
string_slice();
do_struct();
do_map();
//do_err();
do_generic();
do_closure();
do_smart_p();
do_concurrency();
do_oop();
}
fn zero_plus(i: i32) -> i32 {
0 + i
}
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
//fn area(r: &Rectangle) -> u32 {
// r.height * r.width
//}
impl Rectangle {
fn area(&self) -> u32 {
self.height * self.width
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
fn square(size: u32) -> Rectangle {
Rectangle {
width: size,
height: size,
}
}
}
fn do_struct() {
let rect1 = Rectangle {
width: 20,
height: 50,
};
let rect2 = Rectangl | e {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("rect1 area: {}", rect1.area());
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
println!("rect1: {:?}", &(Rectangle::square(3)));
// println!(
// "The area of the rectangle is {} square pixels.",
// area(&rect1)
// );
// println!("rect1: {:?}", &rect1);
} | identifier_body |
|
loadtest_types.go |
// components.
type Server struct {
// Name is a string that distinguishes this server from others in the test.
// Since tests are currently limited to one server, setting this field is not
// recommended. set this field. If no name is explicitly provided, the
// operator will assign one.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// server. For example, "java" may represent Java.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this server should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the server
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the server's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored" |
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
| random_line_split |
|
loadtest_types.go | output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored"
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
// LoadTestStatus defines the observed state of LoadTest
type LoadTestStatus struct {
// State identifies the current state of the load test. It is
// important to note that this state is level-based. This means its
// transition is non-deterministic.
State LoadTestState `json:"state"`
// Reason is a camel-case string that indicates the reasoning behind the
// current state.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human legible string that describes the current state.
// +optional
Message string `json:"message,omitempty"`
// StartTime is the time when the controller first reconciled the load test.
// It is maintained in a best-attempt effort; meaning, it is not guaranteed to
// be correct.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// StopTime is the time when the controller last entered the Succeeded,
// Failed or Errored states.
// +optional
StopTime *metav1.Time `json:"stopTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// LoadTest is the Schema for the loadtests API
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type LoadTest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadTestSpec `json:"spec,omitempty"`
Status LoadTestStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadTestList contains a list of LoadTest
type LoadTestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadTest `json:"items"`
}
func | init | identifier_name |
|
loadtest_types.go | it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test server is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Client defines a component that sends traffic to a server component.
type Client struct {
// Name is a string that distinguishes this client from others in the test.
// Explicitly setting a name is recommended when it is helpful to
// differentiate between multiple clients. For example, a test may use
// clients with different settings.
//
// Most often, this field will not be set. When unset, the operator will
// assign a name to the client.
// +optional
Name *string `json:"name,omitempty"`
// Language is the code that identifies the programming language used by the
// client. For example, "go" may represent Go.
//
// Specifying a language is required. If the language is unknown to the
// operator, a user must manually set a run image. If the user intends for
// the operator to clone and build code, it must also manually set a build
// image.
Language string `json:"language"`
// Pool specifies the name of the set of nodes where this client should be
// scheduled. If unset, the controller will choose a pool based on defaults.
// +optional
Pool *string `json:"pool,omitempty"`
// Clone specifies the repository and snapshot where the code for the client
// can be found. This field should not be set if the code has been prebuilt
// in the run image.
// +optional
Clone *Clone `json:"clone,omitempty"`
// Build describes how the cloned code should be built, including any
// compiler arguments or flags. This field is only necessary if the output
// from the clone container must be pre-processed before running the tests
// in the run container.
//
// When build is specified on a test, the operator will use the client's
// language to find a container with a compiler for that language. If the
// language is unknown to the operator, a user must include a custom docker
// image.
//
// Note that it does not usually make sense to include build instructions
// without clone instructions. If doing so, the build container must include
// its input and write its output into the /src/workspace directory for the
// run container to access it.
// +optional
Build *Build `json:"build,omitempty"`
// Run describes a list of run containers. The container for the test client is always
// the first container on the list.
Run []corev1.Container `json:"run"`
}
// Results defines where and how test results and artifacts should be
// stored.
type Results struct {
// BigQueryTable names a dataset where the results of the test
// should be stored. If omitted, no results are saved to BigQuery.
// +optional
BigQueryTable *string `json:"bigQueryTable,omitempty"`
}
// LoadTestSpec defines the desired state of LoadTest
type LoadTestSpec struct {
// Driver is the component that orchestrates the test. It may be
// unspecified, allowing the system to choose the appropriate driver.
// +optional
Driver *Driver `json:"driver,omitempty"`
// Servers are a list of components that receive traffic from
// clients.
// +optional
Servers []Server `json:"servers,omitempty"`
// Clients are a list of components that send traffic to servers.
// +optional
Clients []Client `json:"clients,omitempty"`
// Results configures where the results of the test should be
// stored. When omitted, the results will only be stored in
// Kubernetes for a limited time.
// +optional
Results *Results `json:"results,omitempty"`
// ScenariosJSON is string with the contents of a Scenarios message,
// formatted as JSON. See the Scenarios protobuf definition for details:
// https://github.com/grpc/grpc-proto/blob/master/grpc/testing/control.proto.
// +optional
ScenariosJSON string `json:"scenariosJSON,omitempty"`
// Timeout provides the longest running time allowed for a LoadTest.
// +kubebuilder:validation:Minimum:=1
TimeoutSeconds int32 `json:"timeoutSeconds"`
// TTL provides the longest time a LoadTest can live on the cluster.
// +kubebuilder:validation:Minimum:=1
TTLSeconds int32 `json:"ttlSeconds"`
}
// LoadTestState reflects the derived state of the load test from its
// components. If any one component has errored, the load test will be marked in
// an Errored state, too. This will occur even if the other components are
// running or succeeded.
// +kubebuilder:default=Unknown
type LoadTestState string
const (
// Unknown states indicate that the load test is in an indeterminate state.
// Something may have gone wrong, but it may be recoverable. No assumption
// should be made about the next state. It may transition to any other state
// or remain Unknown until a timeout occurs.
Unknown LoadTestState = "Unknown"
// Initializing states indicate that load test's pods are under construction.
// This may mean that code is being cloned, built or assembled.
Initializing LoadTestState = "Initializing"
// Running states indicate that the initialization for a load test's pods has
// completed successfully. The run container has started.
Running LoadTestState = "Running"
// Succeeded states indicate the driver pod's run container has terminated
// successfully, signaled by a zero exit code.
Succeeded LoadTestState = "Succeeded"
// Errored states indicate the load test encountered a problem that prevented
// a successful run.
Errored LoadTestState = "Errored"
)
// IsTerminated returns true if the test has finished due to a success, failure
// or error. Otherwise, it returns false.
func (lts LoadTestState) IsTerminated() bool {
return lts == Succeeded || lts == Errored
}
// InitContainerError is the reason string when an init container has failed on
// one of the load test's pods.
var InitContainerError = "InitContainerError"
// ContainerError is the reason string when a container has failed on one of the
// load test's pods.
var ContainerError = "ContainerError"
// FailedSettingDefaultsError is the reason string when defaults could not be
// set on a load test.
var FailedSettingDefaultsError = "FailedSettingDefaults"
// ConfigurationError is the reason string when a LoadTest spec is invalid.
var ConfigurationError = "ConfigurationError"
// PodsMissing is the reason string when the load test is missing pods and is still
// in the Initializing state.
var PodsMissing = "PodsMissing"
// PoolError is the reason string when a driver, client or server requires nodes
// from a nonexistent pool.
var PoolError = "PoolError"
// TimeoutErrored is the reason string when the load test has not yet terminated
// but exceeded the timeout.
var TimeoutErrored = "TimeoutErrored"
// KubernetesError is the reason string when an issue occurs with Kubernetes
// that is not known to be directly related to a load test.
var KubernetesError = "KubernetesError"
// LoadTestStatus defines the observed state of LoadTest
type LoadTestStatus struct {
// State identifies the current state of the load test. It is
// important to note that this state is level-based. This means its
// transition is non-deterministic.
State LoadTestState `json:"state"`
// Reason is a camel-case string that indicates the reasoning behind the
// current state.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human legible string that describes the current state.
// +optional
Message string `json:"message,omitempty"`
// StartTime is the time when the controller first reconciled the load test.
// It is maintained in a best-attempt effort; meaning, it is not guaranteed to
// be correct.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty"`
// StopTime is the time when the controller last entered the Succeeded,
// Failed or Errored states.
// +optional
StopTime *metav1.Time `json:"stopTime,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// LoadTest is the Schema for the loadtests API
// +kubebuilder:printcolumn:name="State",type=string,JSONPath=`.status.state`
// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp`
type LoadTest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadTestSpec `json:"spec,omitempty"`
Status LoadTestStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadTestList contains a list of LoadTest
type LoadTestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadTest `json:"items"`
}
func init() | {
SchemeBuilder.Register(&LoadTest{}, &LoadTestList{})
} | identifier_body |
|
tgsw.rs | let tlwe_params = params.tlwe_params.clone();
let tlwe_key = TLweKey::generate(&tlwe_params);
Self {
params: params.clone(),
// tlwe_params,
tlwe_key,
}
}
pub(crate) fn encrypt(&self, result: &mut TGswSample, message: i32, alpha: f64) {
result.encrypt_zero(alpha, self);
result.add_mu_int_h(message, &self.params);
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct TGswSample {
/// (k+1)*l TLwe Sample
all_sample: Vec<Vec<TLweSample>>,
/// Horizontal blocks (l lines) of TGSW matrix
// bloc_sample: Vec<TLweSample>,
k: i32,
l: i32,
}
impl TGswSample {
pub(crate) fn new(params: &TGswParams) -> Self {
let k = params.tlwe_params.k;
// Lines / rows
let l = params.l;
// TODO: find out if this is correctamente
let all_sample = vec![vec![TLweSample::new(¶ms.tlwe_params); (k + 1) as usize]; l as usize];
Self { all_sample, k, l }
}
pub(crate) fn encrypt_zero(&mut self, alpha: f64, key: &TGswKey) {
let rl_key = &key.tlwe_key;
self.all_sample[0]
.iter_mut()
.for_each(|s| s.encrypt_zero(alpha, rl_key));
// for p in 0..kpl as usize {
// self.all_sample[0][p].encrypt_zero(alpha, rl_key);
// }
}
pub(crate) fn add_mu_int_h(&mut self, message: i32, params: &TGswParams) {
let h = ¶ms.h;
// TFHE comment: Compute self += H
// My comment: Compute self += H * message (ish)
self.all_sample = self
.all_sample
.iter()
.enumerate()
.map(|(i, is): (usize, &Vec<TLweSample>)| {
is.iter()
.map(|js: &TLweSample| {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) | {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs; | identifier_body |
|
tgsw.rs | {
let new_a: Vec<TorusPolynomial> = js
.a
.iter()
.map(|a: &TorusPolynomial| {
let new_coefs = a
.coefs
.iter()
.enumerate()
.map(|(coef_idx, coef): (usize, &i32)| {
if coef_idx == 0 {
coef + message * h[i]
} else {
*coef
}
})
.collect::<Vec<Torus32>>();
TorusPolynomial::from(new_coefs)
})
.collect();
TLweSample {
a: new_a,
..js.clone()
}
})
.collect()
})
.collect();
// Is equivalent to:
// for i in 0..l as usize {
// for j in 0..=k as usize {
// self.all_sample[i][j].a[j].coefs[0] += message * h[i];
// }
// }
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_h(&mut self, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
// compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
self.all_sample[i][j].a[j].coefs[0] += h[i];
}
}
}
#[allow(clippy::needless_range_loop)]
pub(crate) fn add_mu_h(&mut self, message: &IntPolynomial, params: &TGswParams) {
let k = params.tlwe_params.k;
let l = params.l;
let h = ¶ms.h;
let mu = &message.coefs;
// Compute self += H
for i in 0..l as usize {
for j in 0..=k as usize {
let target = &mut self.all_sample[i][j].a[j].coefs;
println!("target coefs befor: {:?}", target);
target
.iter_mut()
.zip_eq(mu.iter())
.for_each(|(t, mu)| *t += mu * h[i]);
println!("target coefs after: {:?}", target);
// for jj in 0..n as usize {
// println!(
// "Target len: {}, mu len: {}, h len: {}, jj: {}, n: {}",
// target.len(),
// mu.len(),
// h.len(),
// jj,
// n
// );
// target[jj] += mu[jj] * h[i];
// }
}
}
}
}
/// Update l'accumulateur ligne 5 de l'algo toujours
/// void tGswTLweDecompH(IntPolynomial* result, const TLweSample* sample,const TGswParams* params);
/// accum * sample
pub(crate) fn tgsw_extern_mul_to_tlwe(
accum: &TLweSample,
sample: &TGswSample,
params: &TGswParams,
) -> TLweSample {
let par = ¶ms.tlwe_params;
let mut result = TLweSample {
a: accum
.a
.iter()
.map(|polynomial| TorusPolynomial::zero(polynomial.len()))
.collect(),
current_variance: 0_f64,
k: accum.k,
};
let mut dec = tgsw_tlwe_decomposition_h(accum, params);
let outer = dec.len();
let inner = dec[0].len();
let mut new_dec = vec![vec![IntPolynomial::zero(0); outer]; inner];
#[allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn | test_add_h | identifier_name |
|
tgsw.rs | dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj]
// );
// }
} else | {
assert!(new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.all(|(a, b)| a == b));
} | conditional_block |
|
tgsw.rs | allow(clippy::needless_range_loop)]
for x in 0..inner {
for y in 0..outer {
std::mem::swap(&mut new_dec[x][y], &mut dec[y][x]);
}
}
let dec = new_dec;
dec
.iter()
.flatten()
.zip_eq(sample.all_sample.iter().flatten())
.for_each(|(d, a)| result.add_mul_r_(d, a, par));
result
// for i in 0..dec.len() as usize {
// println!("kpl: {}, k: {}, l: {}, i: {}", kpl, par.k, params.l, i);
// // TODO: Figure out if this is supposed to be [0][i] instead, or something else...
// let d = &dec[i][0];
// let ass = &sample.all_sample[i][0];
// accum.add_mul_r_(d, ass, par);
// }
// for (int32_t i = 0; i < kpl; i++) {
// tLweAddMulRTo(accum, &dec[i], &sample->all_sample[i], par);
// }
}
/// Decompose a TLwe-sample by the given TGsw parameters
fn tgsw_tlwe_decomposition_h(sample: &TLweSample, params: &TGswParams) -> Vec<Vec<IntPolynomial>> {
let tlwe_params = ¶ms.tlwe_params;
let k = tlwe_params.k;
let mut result =
vec![vec![IntPolynomial::new(tlwe_params.n); params.l as usize]; (tlwe_params.k + 1) as usize];
for i in 0..=k {
// b=a[k]
tgsw_torus32_polynomial_decomposition_h(
&mut result[(i/* /* TODO: Remove this when you figure this out: Don't think this is necessary? */ * l*/)
as usize],
&sample.a[i as usize],
params,
);
// tGswTorus32PolynomialDecompH(result + (i * l), &sample->a[i], params);
}
result
}
fn tgsw_torus32_polynomial_decomposition_h(
result: &mut Vec<IntPolynomial>,
sample: &TorusPolynomial,
params: &TGswParams,
) {
let n = params.tlwe_params.n;
let l = params.l;
let bg_bit = params.bg_bit;
let mask_mod = params.mask_mod;
let half_bg = params.half_bg;
let offset = params.offset;
// First, add offset to everyone
let buf: Vec<i32> = sample
.coefs
.iter()
.map(|c| c.wrapping_add(offset as i32))
.collect();
// Then, do the decomposition (TODO: in parallel)
#[allow(clippy::needless_range_loop)]
for p in 0..l as usize {
let decal = 32 - (p + 1) as i32 * bg_bit;
let res_p: &mut Vec<i32> = &mut result[p].coefs;
for j in 0..n as usize {
let temp1 = (buf[j] >> decal) & mask_mod as i32;
res_p[j] = temp1 - half_bg;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::polynomial::TorusPolynomial;
fn generate_parameters() -> Vec<TGswParams> {
vec![
TGswParams::new(4, 8, TLweParameters::new(512, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(512, 2, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(1024, 1, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(1024, 2, 0f64, 1f64)),
TGswParams::new(4, 8, TLweParameters::new(2048, 1, 0f64, 1f64)),
TGswParams::new(3, 10, TLweParameters::new(2048, 2, 0f64, 1f64)),
]
}
fn generate_keys() -> Vec<TGswKey> {
generate_parameters()
.iter()
.map(TGswKey::generate)
.collect()
}
fn fully_random_tgsw(sample: &mut TGswSample, alpha: f64, params: &TGswParams) {
let l = params.l;
let k = params.tlwe_params.k;
// This is butt-ugly
for j in 0..l as usize {
for i in 0..=k as usize {
let mut row = &mut sample.all_sample[j][i];
for u in 0..=k {
row.a[u as usize] = TorusPolynomial::uniform(row.a.len());
}
row.current_variance = alpha * alpha;
}
}
}
#[test]
fn test_add_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_h(¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
assert_eq!(
new_polynomial.coefs[0], // Should this be i == u?
old_polynomial.coefs[0] + (if j == u { h[i] } else { 0 })
);
assert_eq!(new_polynomial.coefs[1..], old_polynomial.coefs[1..]);
}
}
}
}
}
fn random_int_polynomial(n: i32) -> IntPolynomial {
let mut rng = rand::thread_rng();
use rand::distributions::Distribution;
let d = rand_distr::Uniform::new(i32::MIN, i32::MAX);
let coefs: Vec<i32> = (0..n).map(|_| d.sample(&mut rng) % 10 - 5).collect();
assert_eq!(coefs.len() as i32, n);
IntPolynomial::from(coefs)
}
#[test]
#[ignore]
fn test_add_mu_h() {
for params in generate_parameters() {
let mut sample = TGswSample::new(¶ms);
let kpl = params.kpl;
let l = params.l;
let k = params.tlwe_params.k;
let n = params.tlwe_params.n;
let h = ¶ms.h;
let alpha = 4.2;
let message = random_int_polynomial(n);
fully_random_tgsw(&mut sample, alpha, ¶ms);
let sample_copy = sample.clone();
sample.add_mu_h(&message, ¶ms);
// Verify all coefficients
for i in 0..l as usize {
for j in 0..=k as usize {
assert_eq!(
sample.all_sample[i][j].current_variance,
sample_copy.all_sample[i][j].current_variance
);
for u in 0..=k as usize {
//verify that pol[bloc][i][u]=initial[bloc][i][u]+(bloc==u?hi*mess:0)
let new_polynomial = &sample.all_sample[i][j].a[u];
let old_polynomial = &sample_copy.all_sample[i][j].a[u];
if j == u {
new_polynomial
.coefs
.iter()
.zip_eq(old_polynomial.coefs.iter())
.zip_eq(message.coefs.iter())
.for_each(|((n, o), m)| assert_eq!(*n, *o + h[i] * (dbg!(*m))));
// for jj in 0..n as usize {
// assert_eq!(
// new_polynomial.coefs[jj],
// old_polynomial.coefs[jj] + h[i] * message.coefs[jj] | random_line_split |
||
trie.rs | 個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1); | while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec | let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
| random_line_split |
trie.rs | までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) = | ///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_array(self) -> Result<DoubleArray<T>, std::io::Err
or> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts | > {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
/// | identifier_body |
trie.rs | usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts: vec![] },
Node::<u32> { key: 5 , values: vec![], nexts: vec![] },
Node::<u32> { key: 255, values: vec![], nexts: vec![] },
];
let mut bit_cache = BitCache::new();
// 探索開始位置 = 256。空きindex = 256
// base値 = 空きindex - 先頭ノードのkey = 256 - 2 = 254
assert_eq!(254, Trie::find_base(&nodes, &bit_cache));
// 0 ~ 399, 500 ~ 999 を埋める
(256..400).for_each(|i| bit_cache.set(i));
(500..1000).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1000
// base値 = 空きindex - 先頭ノードのkey = 1000 - 2 = 998
assert_eq!(998, Trie::find_base(&nodes, &bit_cache));
//1000..1002, 1003..1005, 1006..1255 を埋める
(1000..1002).for_each(|i| bit_cache.set(i));
(1003..1005).for_each(|i| bit_cache.set(i));
(1006..1255).for_each(|i| bit_cache.set(i));
// 探索開始位置 = 256。空きindex = 1002
// base値 = 空きindex - 先頭ノードのkey = 1002 - 2 = 1000
assert_eq!(1000, Trie::find_base(&nodes, &bit_cache));
// 400 ~ 500 を埋める
(400..500).for_each(|i| bit_cache.set(i));
// 探索開始位置=1216。空きindex = 1255
// base値 = 空きindex - 先頭ノードのkey = 1255 - 2 = 1253
bit_cache.update_start();
assert_eq!(1253, Trie::find_base(&nodes, &bit_cache));
}
#[test]
#[should_panic(expected = "探索すべきノードがありません")]
fn test_find_base_2() {
let nodes: Vec<Node<u32>> = vec![];
let bit_cache = BitCache::new();
// nodesが空でwith_zero=falseの場合は、base値を求められないのでpanic
Trie::find_base(&nodes, &bit_cache);
}
#[test]
fn test_to_double_array_1() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("ac");
let s3 = String::from("b");
let s4 = String::from("bd");
let s5 = String::from("bdc");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
trie.set(&s4, 5);
trie.set(&s5, 6);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3], double_array.get(&s2).unwrap());
assert_eq!(vec![4], double_array.get(&s3).unwrap());
assert_eq!(vec![5], double_array.get(&s4).unwrap());
assert_eq!(vec![6], double_array.get(&s5).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("ab"));
}
#[test]
fn test_to_double_array_2() {
let trie: Trie<u32> = Trie::new();
let double_array = trie.to_double_array().ok().unwrap();
// 遷移できない場合はpanicする
assert_eq!(None, double_array.get("abc"));
}
#[test]
fn test_to_double_array_3() {
// マルチバイト文字のテスト
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("おすしとビール");
let s2 = String::from("お寿司とビール");
let s3 = String::from("🍣🍺");
trie.set(&s1, 1);
trie.set(&s1, 2);
trie.set(&s2, 3);
trie.set(&s3, 4);
let double_array = trie.to_double_array().ok().unwrap();
// 登録されていて、data_arrに値が存在するkeyは対応する値を返す
assert_eq!(vec![1, 2], double_array.get(&s1).unwrap());
assert_eq!(vec![3] , double_array.get(&s2).unwrap());
assert_eq!(vec![4] , double_array.get(&s3).unwrap());
// 登録されているが、data_arrに値が存在しないkeyはNoneを返す
assert_eq!(None, double_array.get("お寿"));
}
}
| identifier_name |
||
trie.rs | 個までの値を登録できる
/// 超えた場合はpanic
///
/// # Arguments
///
/// * `key` - 追加するキー
/// * `value` - キーに対応する値
pub fn set(&mut self, key: &str, value: T) {
let mut node = &mut self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &mut node.nexts[i];
},
Err(i) => {
node.nexts.insert(i, Node { key: k, values: Vec::new(), nexts: Vec::new() });
node = &mut node.nexts[i];
}
}
}
self.len += 1;
node.values.push(value);
}
/// trieを探索する
/// keyに対応する値が見つかったら値のスライスを返す
///
/// # Arguments
///
/// * `key` - 探索するkey
pub fn get(&self, key: &str) -> Option<&[T]> {
let mut node = &self.root;
for &k in key.as_bytes() {
match node.nexts.binary_search_by(|probe| probe.key.cmp(&k)) {
Ok(i) => {
node = &node.nexts[i];
},
Err(_) => {
return None;
}
}
}
if node.values.is_empty() {
None
} else {
Some(&node.values)
}
}
/// トライ木をダブル配列に変換する
///
/// # Panics
/// dataをバイト列に変換できなかった場合にpanicする。
///
/// # Errors
///
///
/// # Arguments
///
/// * `len` - ダブル配列の初期サイズ
pub fn to_double_ar | o::Error> {
let max_key = u8::max_value() as usize + 1; // keyが取りうる値のパターン
let mut len = if max_key > (4 * self.len) { max_key } else { 4 * self.len };
let mut base_arr: Vec<u32> = vec![0; len];
let mut check_arr: Vec<u32> = vec![0; len];
let mut data_arr: Vec<u8> = Vec::with_capacity(self.len);
let mut bit_cache: BitCache = BitCache::new();
bit_cache.set(0);
bit_cache.set(1);
let mut stack: Vec<(usize, Node<T>)> = Vec::with_capacity(self.len);
if !self.root.nexts.is_empty() {
stack.push((1, self.root));
}
while !stack.is_empty() {
let (curr_idx, mut node) = stack.pop().unwrap();
bit_cache.update_start();
// base値を探索・セット
if !node.values.is_empty() {
// valuesが存在する場合はkey=255のノードとして計算する
node.nexts.push(Node { key: u8::max_value(), values: vec![], nexts: vec![] });
}
let base: usize = Self::find_base(&node.nexts, &bit_cache);
base_arr[curr_idx] = base as u32;
// 配列の長さが足りなければ配列を拡張
if base + max_key >= len {
len = len * 2;
base_arr.resize(len, 0);
check_arr.resize(len, 0);
}
// 新しいノードをダブル配列に登録
for n in node.nexts {
let i = base + (n.key as usize);
bit_cache.set(i);
check_arr[i] = curr_idx as u32;
if n.key == u8::max_value() {
// valueノードの登録
// base には data の開始 index を格納する
base_arr[i] = data_arr.len() as u32;
// data には末尾に values を追加する
let data = bincode::serialize(&node.values).unwrap();
data_arr.extend_from_slice(&data);
} else {
// 通常ノードの登録
stack.push((i, n));
}
}
}
// 配列のりサイズ
let new_len = match bit_cache.last_index_of_one() {
None => max_key,
Some(new_len) => new_len + max_key,
};
base_arr.resize(new_len, 0);
check_arr.resize(new_len, 0);
DoubleArray::from_arrays(&base_arr, &check_arr, &data_arr)
}
/// 新しいbase値を探索するメソッド
///
/// # Arguments
///
/// * `nodes` - 追加対象のノード
/// * `bit_cache` - BitCacheのインスタンス
/// * `with_zero` - key=0のノードも考慮してbase値を探す
fn find_base(nodes: &[Node<T>], bit_cache: &BitCache) -> usize {
if nodes.is_empty() {
panic!("探索すべきノードがありません");
}
let first_key = nodes[0].key as usize;
let mut offset = 0;
'outer: loop {
let empty_idx = bit_cache.find_empty_idx(offset);
let new_base = empty_idx - first_key;
if empty_idx < 256 {
panic!("empty_idx={}, first_key={}", empty_idx, first_key);
}
// すべてのノードが重複せずに配置できるかをチェック
'inner: for next in nodes {
if bit_cache.get(new_base + next.key as usize) != 0 {
// 空じゃなかった場合はnew_baseを探すとこからやり直し
offset += 1;
continue 'outer;
}
}
return new_base;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_trie_1() {
let mut trie: Trie<i32> = Trie::new();
let s = String::from("abc");
trie.set(&s, 0);
trie.set(&s, 1);
// 登録されたkeyと値が一致している
assert_eq!(0, trie.get(&s).unwrap()[0]);
assert_eq!(1, trie.get(&s).unwrap()[1]);
let s = String::from("cba");
// 登録されていないkeyはNoneを返す
assert_eq!(None, trie.get(&s));
}
#[test]
fn test_trie_2() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("abc");
let s2 = String::from("abd");
let s3 = String::from("zyx");
let s4 = String::from("zwx");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s1, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(14, trie.get(&s1).unwrap()[1]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
}
#[test]
fn test_trie_3() {
let mut trie: Trie<u32> = Trie::new();
let s1 = String::from("あいうえお");
let s2 = String::from("あいえうお");
let s3 = String::from("漢字");
let s4 = String::from("平仮名");
let s5 = String::from("片仮名");
trie.set(&s1, 10);
trie.set(&s2, 11);
trie.set(&s3, 12);
trie.set(&s4, 13);
trie.set(&s5, 14);
// 登録されたkeyと値が一致している
assert_eq!(10, trie.get(&s1).unwrap()[0]);
assert_eq!(11, trie.get(&s2).unwrap()[0]);
assert_eq!(12, trie.get(&s3).unwrap()[0]);
assert_eq!(13, trie.get(&s4).unwrap()[0]);
assert_eq!(14, trie.get(&s5).unwrap()[0]);
}
#[test]
fn test_find_base_1() {
let nodes: Vec<Node<u32>> = vec![
Node::<u32> { key: 2 , values: vec![], nexts | ray(self) -> Result<DoubleArray<T>, std::i | conditional_block |
doctype.py | for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field` | for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list | WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1) | random_line_split |
doctype.py | for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
| docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt_list = | """
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field) | identifier_body |
doctype.py | for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def get_custom_fields(self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): |
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt | return | conditional_block |
doctype.py | for t in list(set(table_fields)):
# Get child doc and its fields
table_doclist = webnotes.model.doc.get('DocType', t, 1)
table_doclist += self.get_custom_fields(t)
doclist += table_doclist
self.apply_property_setters(doclist)
if form:
self.load_select_options(doclist)
self.add_code(doclist[0])
self.load_print_formats(doclist)
self.insert_into_cache(doclist)
return doclist
def | (self, doc_type):
"""
Gets a list of custom field docs masked as type DocField
"""
custom_doclist = []
res = webnotes.conn.sql("""SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", doc_type, as_dict=1)
for r in res:
# Cheat! Mask Custom Field as DocField
custom_field = webnotes.model.doc.Document(fielddata=r)
self.mask_custom_field(custom_field, doc_type)
custom_doclist.append(custom_field)
return custom_doclist
def mask_custom_field(self, custom_field, doc_type):
"""
Masks doctype and parent related properties of Custom Field as that
of DocField
"""
custom_field.fields.update({
'doctype': 'DocField',
'parent': doc_type,
'parentfield': 'fields',
'parenttype': 'DocType',
})
def get_table_fields(self, doclist):
"""
Returns [[options, fieldname]] of fields of type 'Table'
"""
table_fields = []
for d in doclist:
if d.doctype=='DocField' and d.fieldtype == 'Table':
table_fields.append([d.options, d.fieldname])
return table_fields
def apply_property_setters(self, doclist):
"""
"""
property_dict, doc_type_list = self.get_property_setters(doclist)
for d in doclist:
self.update_field_properties(d, property_dict)
self.apply_previous_field_properties(doclist, property_dict,
doc_type_list)
def get_property_setters(self, doclist):
"""
Returns a dict of property setter lists and doc_type_list
"""
from webnotes.utils import cstr
property_dict = {}
# final property dict will be
# {
# doc_type: {
# fieldname: [list of property setter dicts]
# }
# }
doc_type_list = list(set(
d.doctype=='DocType' and d.name or d.parent
for d in doclist))
in_string = '", "'.join(doc_type_list)
for ps in webnotes.conn.sql("""\
SELECT doc_type, field_name, property, property_type, value
FROM `tabProperty Setter`
WHERE doc_type IN ("%s")""" % in_string, as_dict=1):
property_dict.setdefault(ps.get('doc_type'),
{}).setdefault(cstr(ps.get('field_name')), []).append(ps)
return property_dict, doc_type_list
def update_field_properties(self, d, property_dict):
"""
apply properties except previous_field ones
"""
from webnotes.utils import cstr
# get property setters for a given doctype's fields
doctype_property_dict = (d.doctype=='DocField' and property_dict.get(d.parent) or
property_dict.get(d.name))
if not (doctype_property_dict and doctype_property_dict.get(cstr(d.fieldname))): return
from webnotes.utils import cint
prop_updates = []
for prop in doctype_property_dict.get(cstr(d.fieldname)):
if prop.get('property')=='previous_field': continue
if prop.get('property_type') == 'Check' or \
prop.get('value') in ['0', '1']:
prop_updates.append([prop.get('property'), cint(prop.get('value'))])
else:
prop_updates.append([prop.get('property'), prop.get('value')])
prop_updates and d.fields.update(dict(prop_updates))
def apply_previous_field_properties(self, doclist, property_dict,
doc_type_list):
"""
"""
prev_field_dict = self.get_previous_field_properties(property_dict)
if not prev_field_dict: return
for doc_type in doc_type_list:
docfields = self.get_sorted_docfields(doclist, doc_type)
docfields = self.sort_docfields(doc_type, docfields, prev_field_dict)
if docfields: self.change_idx(doclist, docfields, doc_type)
def get_previous_field_properties(self, property_dict):
"""
setup prev_field_dict
"""
from webnotes.utils import cstr
doctype_prev_field_list = []
for doc_type in property_dict:
prev_field_list = []
for prop_list in property_dict.get(doc_type).values():
for prop in prop_list:
if prop.get('property') == 'previous_field':
prev_field_list.append([prop.get('value'),
prop.get('field_name')])
break
if not prev_field_list: continue
doctype_prev_field_list.append([doc_type, dict(prev_field_list)])
if not doctype_prev_field_list: return
return dict(doctype_prev_field_list)
def get_sorted_docfields(self, doclist, doc_type):
"""
get a sorted list of docfield names
"""
sorted_list = sorted([
d for d in doclist
if d.doctype == 'DocField'
and d.parent == doc_type
], key=lambda df: df.idx)
return [d.fieldname for d in sorted_list]
def sort_docfields(self, doc_type, docfields, prev_field_dict):
"""
"""
temp_dict = prev_field_dict.get(doc_type)
if not temp_dict: return
prev_field = 'None' in temp_dict and 'None' or docfields[0]
i = 0
while temp_dict:
get_next_docfield = True
cur_field = temp_dict.get(prev_field)
if cur_field and cur_field in docfields:
try:
del temp_dict[prev_field]
if prev_field in docfields:
docfields.remove(cur_field)
docfields.insert(docfields.index(prev_field) + 1,
cur_field)
elif prev_field == 'None':
docfields.remove(cur_field)
docfields.insert(0, cur_field)
except ValueError:
pass
if cur_field in temp_dict:
prev_field = cur_field
get_next_docfield = False
if get_next_docfield:
i += 1
if i>=len(docfields): break
prev_field = docfields[i]
keys, vals = temp_dict.keys(), temp_dict.values()
if prev_field in vals:
i -= 1
prev_field = keys[vals.index(prev_field)]
return docfields
def change_idx(self, doclist, docfields, doc_type):
for d in doclist:
if d.fieldname and d.fieldname in docfields and d.parent == doc_type:
d.idx = docfields.index(d.fieldname) + 1
def add_code(self, doc):
"""add js, css code"""
import os
from webnotes.modules import scrub, get_module_path
import conf
modules_path = get_module_path(doc.module)
path = os.path.join(modules_path, 'doctype', scrub(doc.name))
def _add_code(fname, fieldname):
fpath = os.path.join(path, fname)
if os.path.exists(fpath):
with open(fpath, 'r') as f:
doc.fields[fieldname] = f.read()
_add_code(scrub(doc.name) + '.js', '__js')
_add_code(scrub(doc.name) + '.css', '__css')
_add_code('%s_list.js' % scrub(doc.name), '__listjs')
_add_code('help.md', 'description')
# embed all require files
import re
def _sub(match):
fpath = os.path.join(os.path.dirname(conf.modules_path), \
re.search('["\'][^"\']*["\']', match.group(0)).group(0)[1:-1])
if os.path.exists(fpath):
with open(fpath, 'r') as f:
return '\n' + f.read() + '\n'
else:
return '\n// no file "%s" found \n' % fpath
if doc.fields.get('__js'):
doc.fields['__js'] = re.sub('(wn.require\([^\)]*.)', _sub, doc.fields['__js'])
# custom script
from webnotes.model.code import get_custom_script
custom = get_custom_script(doc.name, 'Client') or ''
doc.fields['__js'] = doc.fields.setdefault('__js', '') + '\n' + custom
def load_select_options(self, doclist):
"""
Loads Select options for 'Select' fields
with link: as start of options
"""
for d in doclist:
if (d.doctype == 'DocField' and d.fieldtype == 'Select' and
d.options and d.options[:5].lower() == 'link:'):
# Get various options
opt | get_custom_fields | identifier_name |
MotionTrackingLK.py | (w, h, lx, ly):
devx = lx * w/2 + w/2
devy = ly * h/2 + h/2
return int(devx), int(devy)
def device_to_logical(w, h, devx, devy):
lx = (devx - w/2)/(w/2)
ly = (devy - h/2)/(h/2)
return lx, ly
def display_tracks(imgs, batch_tracks):
imgs = np.copy(imgs)
_,_,h,w,c = imgs.shape
imgs_with_tracks = []
for img_seq,tracks in zip(imgs, batch_tracks):
img = img_seq[-1]
for track in tracks:
for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0))
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy | logical_to_device | identifier_name |
|
MotionTrackingLK.py | = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights |
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, | ATb = tf.matmul(A, b, transpose_a=True) | random_line_split |
MotionTrackingLK.py | _pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
i += 1
sum_VxVy += VxVy
return i, sampler, frames, first_frame, sum_VxVy
_, sampler, _, _, sum_VxVy = tf.while_loop(cond, iterate, [i, sampler, frames, first_frame, sum_VxVy])
return sampler, tf.reshape(sum_VxVy, [-1, self.num_tracks, 2])
def call(self, inputs):
init_track_locs = tf.reshape(inputs[0], [-1, self.num_tracks, 2, 1]) * self.center_relative + self.center_relative
imgs = inputs[1]
sampler = tf.reshape(self.sampling_grid, [1, 1, 2, -1]) + init_track_locs
init_track_locs = tf.reshape(init_track_locs, [-1, self.num_tracks, 1, 2])
sampler, tot_VxVy = self.iterative_LK(sampler, imgs[:, 0:2], self.iterations)
tot_VxVy = tf.reshape(tot_VxVy, [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([init_track_locs, tot_VxVy + init_track_locs], axis=2)
i = tf.constant(1)
cond = lambda i, s, imgs, tot_VxVy: tf.less(i, self.seq_len-1)
def iterate(i, sampler, imgs, tot_VxVy):
| sampler, sum_VxVy = self.iterative_LK(sampler, imgs[:, i:i+2], self.iterations)
sum_VxVy = tf.reshape(sum_VxVy, [-1, self.num_tracks, 1, 2])
prev = tf.reshape(tot_VxVy[:, :, i], [-1, self.num_tracks, 1, 2])
tot_VxVy = tf.concat([tot_VxVy, sum_VxVy+prev], axis=2)
i += 1
return i, sampler, imgs, tot_VxVy | identifier_body |
|
MotionTrackingLK.py |
imgs_with_tracks.append(img)
return np.asfarray(imgs_with_tracks)
class MotionTrackingLK(tf.keras.layers.Layer):
def __init__(self, num_tracks, window_pixel_wh=21, sigma=2, iterations=5, **kwargs):
self.sigma = sigma
assert(num_tracks > 1)
assert(window_pixel_wh >= 3)
self.num_tracks = num_tracks
self.win_pixel_wh = window_pixel_wh
self.iterations = iterations
super(MotionTrackingLK, self).__init__(**kwargs)
def build(self, input_shape):
# grab the dimensions of the image here so we can use them later. also will throw errors early for users
self.seq_len = input_shape[1][1]
self.h = input_shape[1][2]
self.w = input_shape[1][3]
self.c = input_shape[1][4]
self.center_relative = tf.constant(
[self.w/self.win_pixel_wh, self.h/self.win_pixel_wh],
shape=[1,1,2,1]
)
# we scale to the smaller axis and then apply transforms to that resulting square
# originally was [0.0, 1.0], but this resulted in the model being unable to learn. not sure why. possibly because tanh learns better than sigmoid
x_t, y_t = tf.meshgrid(
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
tf.linspace(-1.0, 1.0, self.win_pixel_wh),
)
self.sampling_grid = tf.stack([
tf.reshape(x_t, [self.win_pixel_wh*self.win_pixel_wh]),
tf.reshape(y_t, [self.win_pixel_wh*self.win_pixel_wh]),
])
self.sobel_x = tf.constant([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
],
shape=[3, 3, 1, 1]
)
self.sobel_y = tf.constant([
[-1., -2., -1.],
[ 0., 0., 0.],
[ 1., 2., 1.],
],
shape=[3, 3, 1, 1]
)
self.scharr_x = tf.constant([
[-3., 0., 3.],
[-10., 0., 10.],
[-3., 0., 3.],
],
shape=[3, 3, 1, 1]
)
self.scharr_y = tf.constant([
[-3., -10., -3.],
[ 0., 0., 0.],
[ 3., 10., 3.],
],
shape=[3, 3, 1, 1]
)
weights = np.empty([self.win_pixel_wh, self.win_pixel_wh])
center = self.win_pixel_wh//2
for y in range(self.win_pixel_wh):
for x in range(self.win_pixel_wh):
weights[y, x] = (x-center)**2 + (y-center)**2
weights = gaussian(np.sqrt(weights), self.sigma)
self.win_weights = tf.constant(weights, shape=[1, 1, self.win_pixel_wh*self.win_pixel_wh, 1], dtype=tf.float32)
# print(weights)
# tf.print(weights)
# tf.print(tf.reduce_max(weights))
super(MotionTrackingLK, self).build(input_shape)
def sample_ntracks_from_2frames(self, sampler, frames):
x = ((sampler[:, :, 0]) * self.win_pixel_wh) * 0.5
y = ((sampler[:, :, 1]) * self.win_pixel_wh) * 0.5
x = tf.reshape(tf.tile(tf.expand_dims(x, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
y = tf.reshape(tf.tile(tf.expand_dims(y, axis=1), [1, 2, 1, 1]), [-1, self.num_tracks*self.win_pixel_wh**2])
x0 = tf.floor(x)
x1 = x0 + 1
y0 = tf.floor(y)
y1 = y0 + 1
x0 = tf.clip_by_value(x0, 0, self.w-1)
x1 = tf.clip_by_value(x1, 0, self.w-1)
y0 = tf.clip_by_value(y0, 0, self.h-1)
y1 = tf.clip_by_value(y1, 0, self.h-1)
wa = tf.expand_dims((y1-y) * (x1-x), axis=-1)
wb = tf.expand_dims((y1-y) * (x-x0), axis=-1)
wc = tf.expand_dims((y-y0) * (x1-x), axis=-1)
wd = tf.expand_dims((y-y0) * (x-x0), axis=-1)
x0 = tf.cast(x0, tf.int32)
x1 = tf.cast(x1, tf.int32)
y0 = tf.cast(y0, tf.int32)
y1 = tf.cast(y1, tf.int32)
# necessary so that first dimension is equal. makes it so that we are repeatedly sampling for each image
tiled_imgs = tf.reshape(frames, [-1, self.h, self.w, self.c])
# batch dimension in this case goes through first frame for each batch, then second frame
Ia = tf.gather_nd(tiled_imgs, tf.stack([y0, x0], axis=-1), batch_dims=1)
Ib = tf.gather_nd(tiled_imgs, tf.stack([y0, x1], axis=-1), batch_dims=1)
Ic = tf.gather_nd(tiled_imgs, tf.stack([y1, x0], axis=-1), batch_dims=1)
Id = tf.gather_nd(tiled_imgs, tf.stack([y1, x1], axis=-1), batch_dims=1)
return tf.reshape(wa*Ia + wb*Ib + wc*Ic + wd*Id, [-1, 2, self.num_tracks, self.win_pixel_wh, self.win_pixel_wh, self.c])
def calc_velocity_2frames_ntracks_LK(self, first_frame, second_frame):
ff_comb = tf.reshape(first_frame, [-1, self.win_pixel_wh, self.win_pixel_wh, self.c])
Ix = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_x, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
Iy = tf.reshape(
tf.nn.convolution(ff_comb, self.sobel_y, padding="SAME"),
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)
A = tf.concat([Ix,Iy], axis=3)
ATA = tf.matmul(A, A*self.win_weights, transpose_a=True)
# ATA_1 = tf.linalg.inv(ATA)
# tf.linalg.inv gives me a cusolver error, so i generate inverse manually
a = ATA[:,:, 0,0]
b = ATA[:,:, 0,1]
c = ATA[:,:, 1,0]
d = ATA[:,:, 1,1]
ATA_1 = tf.reshape(1/(a*d - b*c + 1e-07), [-1, self.num_tracks, 1, 1])*tf.stack([tf.stack([d, -b], axis=-1), tf.stack([-c, a], axis=-1)], axis=3)
b = -1*tf.reshape(
second_frame-first_frame,
[-1, self.num_tracks, self.win_pixel_wh*self.win_pixel_wh, 1]
)*self.win_weights
ATb = tf.matmul(A, b, transpose_a=True)
VxVy = tf.matmul(ATA_1, ATb)
return VxVy
def iterative_LK(self, sampler, frames, iterations):
out = self.sample_ntracks_from_2frames(sampler, frames)
first_frame = out[:, 0]
factor = 1.0
VxVy = self.calc_velocity_2frames_ntracks_LK(first_frame, out[:, 1])*factor
sampler += VxVy
sum_VxVy = VxVy
i = tf.constant(1)
cond = lambda i, s, f, sf, svv: tf.less(i, iterations)
def iterate(i, sampler, frames, first_frame, sum_VxVy):
out = self.sample_ntracks_from_2frames(sampler, frames)
VxVy = self.calc_velocity_ | for t_seq in range(1, len(track)):
lx_p, ly_p = track[t_seq-1]
x_p, y_p = logical_to_device(w, h, lx_p, ly_p)
lx, ly = track[t_seq]
x, y = logical_to_device(w, h, lx, ly)
img = cv.arrowedLine(img, (x_p, y_p), (x, y), (255, 0, 0)) | conditional_block |
|
main.rs | ::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct | {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
| OsmStats | identifier_name |
main.rs | ::Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0; | let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
| random_line_split |
|
main.rs | Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min |
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
}
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
| {
osm_stats.lon_min = worker_stats.lon_min
} | conditional_block |
main.rs | Jemalloc;
const WORK_BOUND: usize = 4000;
const MAX_COMPRESSED_BLOB_SIZE: i32 = 64 * 1024;
const MAX_DECOMPRESSED_BLOB_SIZE: i32 = 32 * 1024 * 1024;
#[derive(Debug)]
struct OsmStats {
timestamp_min: i64,
timestamp_max: i64,
nodes: u64,
ways: u64,
relations: u64,
lon_min: f64,
lon_max: f64,
lat_min: f64,
lat_max: f64,
}
fn main() {
let args: Vec<_> = std::env::args_os().collect();
let filename = &args[1];
let orig_handler = panic::take_hook();
panic::set_hook(Box::new(move |panic_info| {
let handler = &orig_handler;
handler(panic_info);
process::exit(1);
}));
match do_processing(filename, num_cpus::get()) {
Ok(result) => println!("{}", result),
Err(err) => println!("{}", err),
}
}
fn do_processing(filename: &std::ffi::OsStr, thread_count: usize) -> Result<String, String> {
let file_handle = File::open(filename).or(Err("unable to open file"))?;
let mmap = unsafe {
MmapOptions::new()
.map(&file_handle)
.or(Err("unable to mmap"))?
};
let bytes = &mmap[..];
let mut reader = BytesReader::from_bytes(&bytes);
let mut sent_messages = 0;
let (sender, receiver) = bounded::<Blob>(WORK_BOUND);
let (return_sender, return_received) = unbounded::<OsmStats>();
thread::scope(|s| {
for _ in 0..thread_count {
let cloned_receiver = receiver.clone();
let cloned_return_sender = return_sender.clone();
s.spawn(move |_| {
let mut buffer = Vec::with_capacity(MAX_DECOMPRESSED_BLOB_SIZE as usize);
let mut stats = empty_osm_stats();
loop {
match cloned_receiver.recv() {
Ok(blob) => {
handle_block(&mut stats, &blob, &mut buffer);
buffer.clear();
}
Err(_e) => break,
}
}
cloned_return_sender
.send(stats)
.expect("failed to send size result");
});
}
loop {
let header_size = match reader.read_sfixed32(bytes).map(|value| value.swap_bytes()) {
Ok(size) if size > MAX_COMPRESSED_BLOB_SIZE => {
return Err("invalid data, compressed blob too large".to_string())
}
Ok(size) => size,
Err(_e) => break,
} as usize;
let blob_header = reader
.read_message_by_len::<BlobHeader>(&bytes, header_size)
.expect("failed to read blob header");
let blob = reader
.read_message_by_len::<Blob>(bytes, blob_header.datasize as usize)
.expect("failed to read blob");
if blob.raw_size.unwrap_or(0) > MAX_DECOMPRESSED_BLOB_SIZE {
return Err("invalid data, uncompressed blob too large".to_string());
}
if blob_header.type_pb == "OSMData" {
sent_messages += 1;
sender.send(blob).expect("failed to send blob");
}
}
drop(sender);
let mut received_messages = 0;
let mut osm_stats = empty_osm_stats();
while received_messages < thread_count {
let worker_stats = return_received.recv().unwrap();
osm_stats.nodes += worker_stats.nodes;
osm_stats.ways += worker_stats.ways;
osm_stats.relations += worker_stats.relations;
osm_stats.timestamp_max = max(osm_stats.timestamp_max, worker_stats.timestamp_max);
osm_stats.timestamp_min = min(osm_stats.timestamp_min, worker_stats.timestamp_min);
if worker_stats.lat_max > osm_stats.lat_max {
osm_stats.lat_max = worker_stats.lat_max
}
if worker_stats.lat_min < osm_stats.lat_min {
osm_stats.lat_min = worker_stats.lat_min
}
if worker_stats.lon_max > osm_stats.lon_max {
osm_stats.lon_max = worker_stats.lon_max
}
if worker_stats.lon_min < osm_stats.lon_min {
osm_stats.lon_min = worker_stats.lon_min
}
received_messages += 1;
}
Ok(format!("{:#?}", osm_stats))
})
.unwrap()
}
fn handle_block(mut osm_stats: &mut OsmStats, blob: &Blob, buffer: &mut Vec<u8>) {
let zlib_data_ref = blob.zlib_data.as_ref();
let tried_block = if blob.raw.is_some() {
let bytes = blob.raw.as_ref().unwrap();
let mut reader = BytesReader::from_bytes(&bytes);
Some(
PrimitiveBlock::from_reader(&mut reader, &bytes)
.expect("failed to read primitive block"),
)
} else if zlib_data_ref.is_some() {
use flate2::{Decompress, FlushDecompress};
let mut decompress = Decompress::new(true);
decompress
.decompress_vec(&zlib_data_ref.unwrap(), buffer, FlushDecompress::Finish)
.expect("error decompressing");
let mut reader = BytesReader::from_bytes(&buffer);
Some(
PrimitiveBlock::from_reader(&mut reader, &buffer)
.expect("failed to read gzipped primitive block"),
)
} else {
None
};
let block = tried_block.unwrap();
handle_primitive_block(&mut osm_stats, &block);
}
fn handle_primitive_block(mut osm_stats: &mut OsmStats, block: &PrimitiveBlock) {
for primitive in &block.primitivegroup {
if let Some(dense_nodes) = &primitive.dense {
handle_dense_nodes(&mut osm_stats, &dense_nodes, &block);
}
for node in &primitive.nodes {
handle_node(&mut osm_stats, &node, &block);
}
for way in &primitive.ways {
handle_way(&mut osm_stats, &way, &block);
}
for relation in &primitive.relations {
handle_relation(&mut osm_stats, &relation, &block);
}
}
}
fn handle_dense_nodes(
mut osm_stats: &mut OsmStats,
dense_nodes: &DenseNodes,
primitive: &PrimitiveBlock,
) {
osm_stats.nodes += dense_nodes.id.len() as u64;
if let Some(dense_info) = &dense_nodes.denseinfo {
let mut last_timestamp = 0;
for delta_timestamp in &dense_info.timestamp {
let timestamp = last_timestamp + delta_timestamp;
handle_timestamp(&mut osm_stats, timestamp, primitive.date_granularity);
last_timestamp = timestamp;
}
}
let mut last_latitude = 0;
for delta_latitude in &dense_nodes.lat {
let latitude = last_latitude + delta_latitude;
handle_latitude(&mut osm_stats, latitude, &primitive);
last_latitude = latitude;
}
let mut last_longitude = 0;
for delta_longitude in &dense_nodes.lon {
let longitude = last_longitude + delta_longitude;
handle_longitude(&mut osm_stats, longitude, &primitive);
last_longitude = longitude;
}
}
fn handle_node(mut osm_stats: &mut OsmStats, node: &Node, primitive: &PrimitiveBlock) {
osm_stats.nodes += 1;
if let Some(info) = &node.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
handle_latitude(&mut osm_stats, node.lat, &primitive);
handle_longitude(&mut osm_stats, node.lon, &primitive);
}
fn handle_way(mut osm_stats: &mut OsmStats, way: &Way, primitive: &PrimitiveBlock) {
osm_stats.ways += 1;
if let Some(info) = &way.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_relation(mut osm_stats: &mut OsmStats, relation: &Relation, primitive: &PrimitiveBlock) {
osm_stats.relations += 1;
if let Some(info) = &relation.info {
handle_info(&mut osm_stats, &info, primitive.date_granularity)
}
}
fn handle_info(mut osm_stats: &mut OsmStats, info: &Info, date_granularity: i32) {
if let Some(timestamp) = info.timestamp {
handle_timestamp(&mut osm_stats, timestamp, date_granularity);
}
}
fn handle_timestamp(osm_stats: &mut OsmStats, timestamp: i64, date_granularity: i32) |
fn handle_latitude(osm_stats: &mut OsmStats, latitude: i64, primitive: &PrimitiveBlock) {
let latitude_f =
| {
let millisec_stamp = timestamp * (date_granularity as i64);
if millisec_stamp < osm_stats.timestamp_min {
osm_stats.timestamp_min = millisec_stamp
}
if millisec_stamp > osm_stats.timestamp_max {
osm_stats.timestamp_max = millisec_stamp
}
} | identifier_body |
provider.go | old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) Construct(_ context.Context, _ *rpc.ConstructRequest) (*rpc.ConstructResponse, error) {
return nil, status.Error(codes.Unimplemented, "Construct is not yet implemented")
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *googleCloudProvider) Call(_ context.Context, _ *rpc.CallRequest) (*rpc.CallResponse, error) {
return nil, status.Error(codes.Unimplemented, "Call is not yet implemented")
}
// GetPluginInfo returns generic information about this plugin, like its version.
func (p *googleCloudProvider) GetPluginInfo(context.Context, *empty.Empty) (*rpc.PluginInfo, error) {
return &rpc.PluginInfo{
Version: p.version,
}, nil
}
// Cancel signals the provider to gracefully shut down and abort any ongoing resource operations.
// Operations aborted in this way will return an error (e.g., `Update` and `Create` will either a
// creation error or an initialization error). Since Cancel is advisory and non-blocking, it is up
// to the host to decide how long to wait after Cancel is called before (e.g.)
// hard-closing any gRPC connection.
func (p *googleCloudProvider) Cancel(context.Context, *empty.Empty) (*empty.Empty, error) {
return &empty.Empty{}, nil
}
func (p *googleCloudProvider) setLoggingContext(ctx context.Context) {
log.SetOutput(&LogRedirector{
writers: map[string]func(string) error{
tfTracePrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfDebugPrefix: func(msg string) error { return p.host.Log(ctx, diag.Debug, "", msg) },
tfInfoPrefix: func(msg string) error { return p.host.Log(ctx, diag.Info, "", msg) },
tfWarnPrefix: func(msg string) error { return p.host.Log(ctx, diag.Warning, "", msg) },
tfErrorPrefix: func(msg string) error { return p.host.Log(ctx, diag.Error, "", msg) },
},
})
}
func (p *googleCloudProvider) getConfig(configName, envName string) string {
if val, ok := p.config[configName]; ok {
return val
}
return os.Getenv(envName)
}
func (p *googleCloudProvider) getPartnerName() string | {
result := p.getConfig("partnerName", "GOOGLE_PARTNER_NAME")
if result != "" {
return result
} else {
disablePartner := p.getConfig("disablePartnerName", "GOOGLE_DISABLE_PARTNER_NAME")
if disablePartner == "true" {
return ""
}
}
return "Pulumi"
} | identifier_body |
|
provider.go | has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs
inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Deserialize the last known state.
oldState, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.oldState", label), SkipNulls: true,
})
if err != nil {
return nil, errors.Wrapf(err, "reading resource state")
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
body := p.prepareAPIInputs(inputs, oldState, res.UpdateProperties)
uri := res.ResourceUrl(req.GetId())
if strings.HasSuffix(uri, ":getIamPolicy") {
uri = strings.ReplaceAll(uri, ":getIamPolicy", ":setIamPolicy")
}
op, err := p.client.RequestWithTimeout(res.UpdateVerb, uri, body, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s: %q %+v", err, uri, body)
}
resp, err := p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// Read the inputs to persist them into state.
newInputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.newInputs", label),
KeepUnknowns: true,
KeepSecrets: true,
})
if err != nil {
return nil, errors.Wrapf(err, "diff failed because malformed resource inputs")
}
// Store both outputs and inputs into the state and return RPC checkpoint.
outputs, err := plugin.MarshalProperties(
checkpointObject(newInputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), KeepSecrets: true, SkipNulls: true},
)
return &rpc.UpdateResponse{
Properties: outputs,
}, nil
}
// Delete tears down an existing resource with the given ID. If it fails, the resource is assumed
// to still exist.
func (p *googleCloudProvider) Delete(_ context.Context, req *rpc.DeleteRequest) (*empty.Empty, error) {
urn := resource.URN(req.GetUrn())
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
uri := res.ResourceUrl(req.GetId())
if res.NoDelete {
// At the time of writing, the classic GCP provider has the same behavior and warning for 10 resources.
logging.V(1).Infof("%q resources"+
" cannot be deleted from Google Cloud. The resource %s will be removed from Pulumi"+
" state, but will still be present on Google Cloud.", resourceKey, req.GetId())
return &empty.Empty{}, nil
}
resp, err := p.client.RequestWithTimeout("DELETE", uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
_, err = p.waitForResourceOpCompletion(res.BaseUrl, resp)
if err != nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
return &empty.Empty{}, nil
}
// Construct creates a new component resource.
func (p *googleCloudProvider) | Construct | identifier_name |
|
provider.go | p.waitForResourceOpCompletion(res.BaseUrl, op)
if err != nil {
if resp == nil {
return nil, errors.Wrapf(err, "waiting for completion")
}
// A partial failure may have occurred because we got an error and a response.
// Try reading the resource state and return a partial error if there is some.
id, idErr := calculateResourceId(res, inputsMap, resp)
if idErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / calculate ID %s", idErr)
}
readResp, getErr := p.client.RequestWithTimeout("GET", id, nil, 0)
if getErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / read state %s", getErr)
}
checkpoint, cpErr := plugin.MarshalProperties(
checkpointObject(inputs, readResp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.partialCheckpoint", label), KeepSecrets: true, SkipNulls: true},
)
if cpErr != nil {
return nil, errors.Wrapf(err, "waiting for completion / checkpoint %s", cpErr)
}
return nil, partialError(id, err, checkpoint, req.GetProperties())
}
// Store both outputs and inputs into the state.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
id, err := calculateResourceId(res, inputsMap, resp)
if err != nil {
return nil, errors.Wrapf(err, "calculating resource ID")
}
return &rpc.CreateResponse{
Id: id,
Properties: checkpoint,
}, nil
}
func (p *googleCloudProvider) prepareAPIInputs(
inputs, state resource.PropertyMap,
properties map[string]resources.CloudAPIProperty) map[string]interface{} {
inputsMap := inputs.Mappable()
stateMap := state.Mappable()
return p.converter.SdkPropertiesToRequestBody(properties, inputsMap, stateMap)
}
// waitForResourceOpCompletion keeps polling the resource or operation URL until it gets
// a success or a failure of provisioning.
// Note that both a response and an error can be returned in case of a partially-failed deployment
// (e.g., resource is created but failed to initialize to completion).
func (p *googleCloudProvider) waitForResourceOpCompletion(baseUrl string, resp map[string]interface{}) (map[string]interface{}, error) {
retryPolicy := backoff.Backoff{
Min: 1 * time.Second,
Max: 15 * time.Second,
Factor: 1.5,
Jitter: true,
}
for {
logging.V(9).Infof("waiting for completion: %+v", resp)
// There are two styles of operations: one returns a 'done' boolean flag, another one returns status='DONE'.
done, hasDone := resp["done"].(bool)
status, hasStatus := resp["status"].(string)
if completed := (hasDone && done) || (hasStatus && status == "DONE"); completed {
// Extract an error message from the response, if any.
var err error
if failure, has := resp["error"]; has {
err = errors.Errorf("operation errored with %+v", failure)
} else if statusMessage, has := resp["statusMessage"]; has {
err = errors.Errorf("operation failed with %q", statusMessage)
}
// Extract the resource response, if any.
// A partial error could happen, so both response and error could be available.
if response, has := resp["response"].(map[string]interface{}); has {
return response, err
}
if operationType, has := resp["operationType"].(string); has && strings.Contains(strings.ToLower(operationType), "delete") {
return resp, err
}
// Check if there's a target link.
if targetLink, has := resp["targetLink"].(string); has {
// Try reading resource state.
state, getErr := p.client.RequestWithTimeout("GET", targetLink, nil, 0)
if getErr != nil {
if err != nil {
// Return the original creation error if resource read failed.
return nil, err
}
return nil, getErr
}
// A partial error could happen, so both response and error could be available.
return state, err
}
// At this point, we assume either a complete failure or a clean response.
if err != nil {
return nil, err
}
return resp, nil
}
var pollUri string
if selfLink, has := resp["selfLink"].(string); has && hasStatus {
pollUri = selfLink
} else {
if name, has := resp["name"].(string); has && strings.HasPrefix(name, "operations/") {
pollUri = fmt.Sprintf("%s/v1/%s", baseUrl, name)
}
}
if pollUri == "" {
return resp, nil
}
time.Sleep(retryPolicy.Duration())
op, err := p.client.RequestWithTimeout("GET", pollUri, nil, 0)
if err != nil {
return nil, errors.Wrapf(err, "polling operation status")
}
resp = op
}
}
// Read the current live state associated with a resource.
func (p *googleCloudProvider) Read(_ context.Context, req *rpc.ReadRequest) (*rpc.ReadResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Read(%s)", p.name, urn)
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
id := req.GetId()
uri := res.ResourceUrl(id)
// Retrieve the old state.
oldState, err := plugin.UnmarshalProperties(req.GetProperties(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), KeepUnknowns: true, SkipNulls: true, KeepSecrets: true,
})
if err != nil {
return nil, err
}
// Read the current state of the resource from the API.
newState, err := p.client.RequestWithTimeout("GET", uri, nil, 0)
if err != nil {
if reqErr, ok := err.(*googleapi.Error); ok && reqErr.Code == http.StatusNotFound {
// 404 means that the resource was deleted.
return &rpc.ReadResponse{Id: ""}, nil
}
return nil, fmt.Errorf("error sending request: %s", err)
}
// Extract old inputs from the `__inputs` field of the old state.
inputs := parseCheckpointObject(oldState)
newStateProps := resource.NewPropertyMapFromMap(newState)
if inputs == nil {
return nil, status.Error(codes.Unimplemented, "Import is not yet implemented")
} else {
// It's hard to infer the changes in the inputs shape based on the outputs without false positives.
// The current approach is complicated but it's aimed to minimize the noise while refreshing:
// 0. We have "old" inputs and outputs before refresh and "new" outputs read from API.
// 1. Project old outputs to their corresponding input shape (exclude read-only properties).
oldInputProjection := getInputsFromState(res, oldState)
// 2. Project new outputs to their corresponding input shape (exclude read-only properties).
newInputProjection := getInputsFromState(res, newStateProps)
// 3. Calculate the difference between two projections. This should give us actual significant changes
// that happened in Google Cloud between the last resource update and its current state.
diff := oldInputProjection.Diff(newInputProjection)
// 4. Apply this difference to the actual inputs (not a projection) that we have in state.
inputs = applyDiff(inputs, diff)
}
// Store both outputs and inputs into the state checkpoint.
checkpoint, err := plugin.MarshalProperties(
checkpointObject(inputs, newState),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.checkpoint", label), KeepSecrets: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
inputsRecord, err := plugin.MarshalProperties(
inputs,
plugin.MarshalOptions{Label: fmt.Sprintf("%s.inputs", label), KeepUnknowns: true, SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.ReadResponse{Id: id, Properties: checkpoint, Inputs: inputsRecord}, nil
}
// Update updates an existing resource with new values.
func (p *googleCloudProvider) Update(_ context.Context, req *rpc.UpdateRequest) (*rpc.UpdateResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Update(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs | inputs, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.properties", label), SkipNulls: true,
})
if err != nil { | random_line_split |
|
provider.go |
if err = uncompressed.Close(); err != nil {
return nil, errors.Wrap(err, "closing uncompress stream for metadata")
}
return &resourceMap, nil
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *googleCloudProvider) Configure(ctx context.Context,
req *rpc.ConfigureRequest) (*rpc.ConfigureResponse, error) {
for key, val := range req.GetVariables() {
p.config[strings.TrimPrefix(key, "google-native:config:")] = val
}
p.setLoggingContext(ctx)
impersonateServiceAccountDelegatesString := p.getConfig("impersonateServiceAccountDelegates", "")
var impersonateServiceAccountDelegates []string
if impersonateServiceAccountDelegatesString != "" {
err := json.Unmarshal([]byte(impersonateServiceAccountDelegatesString), &impersonateServiceAccountDelegates)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Impersonate Service Account Delegates", impersonateServiceAccountDelegatesString)
}
}
scopesString := p.getConfig("scopes", "")
var scopes []string
if scopesString != "" {
err := json.Unmarshal([]byte(scopesString), &scopes)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal %q as Scopes", scopesString)
}
}
appendUserAgent := p.getConfig("appendUserAgent", "GOOGLE_APPEND_USER_AGENT")
config := googleclient.Config{
Credentials: p.getConfig("credentials", "GOOGLE_CREDENTIALS"),
AccessToken: p.getConfig("accessToken", "GOOGLE_OAUTH_ACCESS_TOKEN"),
ImpersonateServiceAccount: p.getConfig("impersonateServiceAccount", "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT"),
ImpersonateServiceAccountDelegates: impersonateServiceAccountDelegates,
Scopes: scopes,
PulumiVersion: getPulumiVersion(),
ProviderVersion: version.Version,
PartnerName: p.getPartnerName(),
AppendUserAgent: appendUserAgent,
}
client, err := googleclient.New(ctx, config)
if err != nil {
return nil, err
}
p.client = client
return &rpc.ConfigureResponse{
AcceptSecrets: true,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *googleCloudProvider) Invoke(_ context.Context, req *rpc.InvokeRequest) (*rpc.InvokeResponse, error) {
label := fmt.Sprintf("%s.Invoke(%s)", p.name, req.Tok)
inv, ok := p.resourceMap.Functions[req.Tok]
if !ok {
return nil, errors.Errorf("invoke %q not found", req.Tok)
}
args, err := plugin.UnmarshalProperties(req.GetArgs(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.args", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
// Apply default config values.
for _, param := range inv.Params {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project":
key := resource.PropertyKey(sdkName)
if value, ok := p.getDefaultValue(key, sdkName, args); ok {
args[key] = *value
}
}
}
uri, err := buildFunctionUrl(inv, args)
if err != nil {
return nil, err
}
resp, err := p.client.RequestWithTimeout(inv.Verb, uri, nil, 0)
if err != nil {
return nil, fmt.Errorf("error sending request: %s", err)
}
// Serialize and return outputs.
result, err := plugin.MarshalProperties(
resource.NewPropertyMapFromMap(resp),
plugin.MarshalOptions{Label: fmt.Sprintf("%s.response", label), SkipNulls: true},
)
if err != nil {
return nil, err
}
return &rpc.InvokeResponse{Return: result}, nil
}
// StreamInvoke dynamically executes a built-in function in the provider. The result is streamed
// back as a series of messages.
func (p *googleCloudProvider) StreamInvoke(_ *rpc.InvokeRequest, _ rpc.ResourceProvider_StreamInvokeServer) error {
return status.Error(codes.Unimplemented, "StreamInvoke is not yet implemented")
}
// Check validates that the given property bag is valid for a resource of the given type and returns
// the inputs that should be passed to successive calls to Diff, Create, or Update for this
// resource. As a rule, the provider inputs returned by a call to Check should preserve the original
// representation of the properties as present in the program inputs. Though this rule is not
// required for correctness, violations thereof can negatively impact the end-user experience, as
// the provider inputs are using for detecting and rendering diffs.
func (p *googleCloudProvider) Check(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
urn := resource.URN(req.GetUrn())
label := fmt.Sprintf("%s.Check(%s)", p.name, urn)
logging.V(9).Infof("%s executing", label)
// Deserialize RPC inputs.
olds, err := plugin.UnmarshalProperties(req.GetOlds(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.olds", label), SkipNulls: true,
})
if err != nil {
return nil, err
}
news, err := plugin.UnmarshalProperties(req.GetNews(), plugin.MarshalOptions{
Label: fmt.Sprintf("%s.news", label), KeepUnknowns: true, SkipNulls: true,
})
if err != nil {
return nil, err
}
resourceKey := string(urn.Type())
res, ok := p.resourceMap.Resources[resourceKey]
if !ok {
return nil, errors.Errorf("resource %q not found", resourceKey)
}
// Apply default config values.
var failures []*rpc.CheckFailure
for _, param := range res.CreateParams {
sdkName := param.Name
if param.SdkName != "" {
sdkName = param.SdkName
}
switch sdkName {
case "project", "location", "zone":
key := resource.PropertyKey(sdkName)
configName := sdkName
if sdkName == "location" {
configName = "region"
}
if _, has := news[key]; has {
continue
}
if value, ok := p.getDefaultValue(key, configName, olds); ok {
news[key] = *value
} else {
reason := fmt.Sprintf("missing required property '%s'. Either set it explicitly or configure it with 'pulumi config set google-native:%s <value>'.", sdkName, configName)
failures = append(failures, &rpc.CheckFailure{
Reason: reason,
})
}
}
}
// Auto-naming.
nameKey := resource.PropertyKey("name")
if res.AutoNamePattern != "" && !news.HasValue(nameKey) {
news[nameKey] = getDefaultName(urn, res.AutoNamePattern, olds, news)
}
// Apply property patterns.
for name, prop := range res.CreateProperties {
key := resource.PropertyKey(name)
if prop.SdkName != "" {
key = resource.PropertyKey(prop.SdkName)
}
if value, ok := applyPropertyPattern(key, prop, news); ok {
news[key] = *value
}
}
resInputs, err := plugin.MarshalProperties(news, plugin.MarshalOptions{
Label: fmt.Sprintf("%s.resInputs", label), KeepUnknowns: true})
if err != nil {
return nil, err
}
return &rpc.CheckResponse{Inputs: resInputs, Failures: failures}, nil
}
// Get a default project name for the given inputs.
func (p *googleCloudProvider) getDefaultValue(key resource.PropertyKey, configName string, olds resource.PropertyMap) (*resource.PropertyValue, bool) {
// 1. Check if old inputs define the value.
if v, ok := olds[key]; ok {
return &v, true
}
// 2. Check if the config has a corresponding value.
if cv, ok := p.config[configName]; ok {
v := resource.NewStringProperty(cv)
return &v, true
}
return nil, false
}
func (p *googleCloudProvider) GetSchema(_ context.Context, req *rpc.GetSchemaRequest) (*rpc.GetSchemaResponse, error) {
if v := req.GetVersion(); v != 0 {
return nil, fmt.Errorf("unsupported schema version %d", v)
}
return &rpc.GetSchemaResponse{Schema: string(p.schemaBytes)}, nil
}
// CheckConfig validates the configuration for this provider.
func (p *googleCloudProvider) CheckConfig(_ context.Context, req *rpc.CheckRequest) (*rpc.CheckResponse, error) {
return &rpc.CheckResponse{Inputs: req.GetNews()}, nil
}
// DiffConfig diffs the configuration for this provider.
func (p *googleCloudProvider) DiffConfig(context.Context, *rpc.DiffRequest) (*rpc.DiffResponse, error) {
return &rpc.DiffResponse{
Changes: 0,
Replaces: []string{},
Stables: []string{},
DeleteBeforeReplace: false,
}, nil
}
// Diff checks | {
return nil, errors.Wrap(err, "unmarshalling resource map")
} | conditional_block |
|
fetcher_default.go | _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil { | // we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetch |
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
| conditional_block |
fetcher_default.go | _ Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged { | if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFrom | f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.") | random_line_split |
fetcher_default.go | = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) processRemoteRepoUpdate(ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetchFromStorage(source)
case "http", "https":
return f.fetchRemote(source.String())
case "inline":
src, err := base64.StdEncoding.DecodeString(strings.Replace(source.String(), "inline://", "", 1))
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source.String())
}
return f.decode(bytes.NewBuffer(src))
}
return nil, errors.Errorf("rule: source url uses an unknown scheme: %s", source.String())
}
func (f *FetcherDefault) fetchRemote(source string) ([]Rule, error) {
res, err := f.hc.Get(source)
if err != nil {
return nil, errors.Wrapf(err, "rule: %s", source)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, errors.Errorf("rule: expected http response status code 200 but got %d when fetching: %s", res.StatusCode, source)
}
return f.decode(res.Body)
}
func (f *FetcherDefault) decode(r io.Reader) ([]Rule, error) { |
b, err := io.ReadAll(r)
if err != nil {
return nil, errors.WithStack(err)
}
var ks []Rule
if json.Valid(b) {
d := json.NewDecoder(bytes.NewReader(b))
d.DisallowUnknownFields()
if err := d.Decode(&ks); err != nil {
return nil, errors.WithStack(err)
}
return ks, nil
}
if err := yaml.Unmarshal(b, &ks); err != nil {
return nil, errors.WithStack(err)
} | identifier_body |
|
fetcher_default.go | Fetcher = new(FetcherDefault)
type fetcherRegistry interface {
x.RegistryLogger
RuleRepository() Repository
}
type FetcherDefault struct {
config configuration.Provider
registry fetcherRegistry
hc *http.Client
mux *blob.URLMux
cache map[string][]Rule
cancelWatchers map[string]context.CancelFunc
events chan watcherx.Event
lock sync.Mutex
}
func NewFetcherDefault(
config configuration.Provider,
registry fetcherRegistry,
) *FetcherDefault {
return &FetcherDefault{
registry: registry,
config: config,
mux: cloudstorage.NewURLMux(),
hc: httpx.NewResilientClient(httpx.ResilientClientWithConnectionTimeout(15 * time.Second)).StandardClient(),
cache: make(map[string][]Rule),
cancelWatchers: make(map[string]context.CancelFunc),
events: make(chan watcherx.Event),
}
}
func (f *FetcherDefault) SetURLMux(mux *blob.URLMux) {
f.mux = mux
}
func splitLocalRemoteRepos(ruleRepos []url.URL) (files []string, nonFiles []url.URL) {
files = make([]string, 0, len(ruleRepos))
nonFiles = make([]url.URL, 0, len(ruleRepos))
for _, repo := range ruleRepos {
if repo.Scheme == "file" || repo.Scheme == "" {
files = append(files,
filepath.Clean(
urlx.GetURLFilePath(&repo)))
} else {
nonFiles = append(nonFiles, repo)
}
}
return files, nonFiles
}
// watchLocalFiles watches all files that are configured in the config and are not watched already.
// It also cancels watchers for files that are no longer configured. This function is idempotent.
func (f *FetcherDefault) watchLocalFiles(ctx context.Context) {
f.lock.Lock()
repoChanged := false
cancelWatchers := make(map[string]context.CancelFunc, len(f.cancelWatchers))
localFiles, _ := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
for _, fp := range localFiles {
if cancel, ok := f.cancelWatchers[fp]; !ok {
// watch all files we are not yet watching
repoChanged = true
ctx, cancelWatchers[fp] = context.WithCancel(ctx)
w, err := watcherx.WatchFile(ctx, fp, f.events)
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to watch file, ignoring it.")
continue
}
// we force reading the files
done, err := w.DispatchNow()
if err != nil {
f.registry.Logger().WithError(err).WithField("file", fp).Error("Unable to read file, ignoring it.")
continue
}
go func() { <-done }() // we do not need to wait here, but we need to clear the channel
} else {
// keep watching files we are already watching
cancelWatchers[fp] = cancel
}
}
// cancel watchers for files we are no longer watching
for fp, cancel := range f.cancelWatchers {
if _, ok := cancelWatchers[fp]; !ok {
f.registry.Logger().WithField("file", fp).Info("Stopped watching access rule file.")
repoChanged = true
cancel()
delete(f.cache, fp)
}
}
f.cancelWatchers = cancelWatchers
f.lock.Unlock() // release lock before processing events
if repoChanged {
f.registry.Logger().WithField("repos", f.config.Get(configuration.AccessRuleRepositories)).Info("Detected access rule repository change, processing updates.")
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local repo change").Error("Unable to update access rules.")
}
}
}
func (f *FetcherDefault) Watch(ctx context.Context) error {
f.watchLocalFiles(ctx)
getRemoteRepos := func() map[url.URL]struct{} {
_, remoteRepos := splitLocalRemoteRepos(f.config.AccessRuleRepositories())
repos := make(map[url.URL]struct{}, len(remoteRepos))
for _, repo := range remoteRepos {
repos[repo] = struct{}{}
}
return repos
}
// capture the previous config values to detect changes, and trigger initial processing
strategy := f.config.AccessRuleMatchingStrategy()
if err := f.processStrategyUpdate(ctx, strategy); err != nil {
return err
}
remoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, nil, remoteRepos); err != nil {
return err
}
f.config.AddWatcher(func(_ watcherx.Event, err error) {
if err != nil {
return
}
// watch files that need to be watched
f.watchLocalFiles(ctx)
// update the matching strategy if it changed
if newStrategy := f.config.AccessRuleMatchingStrategy(); newStrategy != strategy {
f.registry.Logger().WithField("strategy", newStrategy).Info("Detected access rule matching strategy change, processing updates.")
if err := f.processStrategyUpdate(ctx, newStrategy); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update access rule matching strategy.")
} else {
strategy = newStrategy
}
}
// update & fetch the remote repos if they changed
newRemoteRepos := getRemoteRepos()
if err := f.processRemoteRepoUpdate(ctx, remoteRepos, newRemoteRepos); err != nil {
f.registry.Logger().WithError(err).Error("Unable to update remote access rule repository config.")
}
remoteRepos = newRemoteRepos
})
go f.processLocalUpdates(ctx)
return nil
}
func (f *FetcherDefault) processStrategyUpdate(ctx context.Context, newValue configuration.MatchingStrategy) error {
if err := f.registry.RuleRepository().SetMatchingStrategy(ctx, newValue); err != nil {
return err
}
return nil
}
func (f *FetcherDefault) p | ctx context.Context, oldRepos, newRepos map[url.URL]struct{}) error {
repoChanged := false
for repo := range newRepos {
if _, ok := f.cache[repo.String()]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("New repo detected, fetching access rules.")
rules, err := f.fetch(repo)
if err != nil {
f.registry.Logger().WithError(err).WithField("repo", repo.String()).Error("Unable to fetch access rules.")
return err
}
f.cacheRules(repo.String(), rules)
}
}
for repo := range oldRepos {
if _, ok := newRepos[repo]; !ok {
repoChanged = true
f.registry.Logger().WithField("repo", repo.String()).Info("Repo was removed, removing access rules.")
f.lock.Lock()
delete(f.cache, repo.String())
f.lock.Unlock()
}
}
if repoChanged {
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "remote change").Error("Unable to update access rules.")
return err
}
}
return nil
}
func (f *FetcherDefault) processLocalUpdates(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case e, ok := <-f.events:
if !ok {
// channel was closed
return
}
f.registry.Logger().
WithField("event", "fsnotify").
WithField("file", e.Source()).
Info("Detected file change for access rules. Triggering a reload.")
if e.Reader() == nil {
f.registry.Logger().WithField("file", e.Source()).Error("Unable to read access rules probably because they were deleted, skipping those.")
continue
}
rules, err := f.decode(e.Reader())
if err != nil {
f.registry.Logger().WithField("file", e.Source()).WithError(err).Error("Unable to decode access rules, skipping those.")
continue
}
f.cacheRules(e.Source(), rules)
if err := f.updateRulesFromCache(ctx); err != nil {
f.registry.Logger().WithError(err).WithField("event_source", "local change").Error("Unable to update access rules.")
}
}
}
}
func (f *FetcherDefault) cacheRules(source string, rules []Rule) {
f.lock.Lock()
defer f.lock.Unlock()
f.cache[source] = rules
}
func (f *FetcherDefault) updateRulesFromCache(ctx context.Context) error {
f.lock.Lock()
defer f.lock.Unlock()
allRules := make([]Rule, 0)
for _, rules := range f.cache {
allRules = append(allRules, rules...)
}
return f.registry.RuleRepository().Set(ctx, allRules)
}
func (f *FetcherDefault) fetch(source url.URL) ([]Rule, error) {
f.registry.Logger().
WithField("location", source.String()).
Debugf("Fetching access rules from given location because something changed.")
switch source.Scheme {
case "azblob", "gs", "s3":
return f.fetch | rocessRemoteRepoUpdate( | identifier_name |
tunnel.py | except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
(resp, pkt) = (None, None)
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string
def | show | identifier_name |
|
tunnel.py | )
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
(resp, pkt) = (None, None)
if resp is None:
self.logger.warning("No response for xid " + str(self.xid))
return (resp, pkt)
def message_send(self, msg):
"""
Send the message to the switch
@param msg A string or OpenFlow message object to be forwarded to
the switch.
"""
if not self.switch_socket:
# Sending a string indicates the message is ready to go
raise Exception("no socket")
if msg.xid == None:
msg.xid = util.gen_xid()
outpkt = msg.pack()
self.logger.debug("Msg out: version %d class %s len %d xid %d",
msg.version, type(msg).__name__, len(outpkt), msg.xid)
with self.tx_lock:
if self.switch_socket.sendall(outpkt) is not None:
raise AssertionError("failed to send message to switch")
return 0 # for backwards compatibility
def clear_queue(self):
"""
Clear the input queue and report the number of messages
that were in it
"""
enqueued_pkt_count = len(self.packets)
with self.packets_cv:
self.packets = []
return enqueued_pkt_count
def __str__(self):
| string = "Controller:\n"
string += " state " + self.dbg_state + "\n"
string += " switch_addr " + str(self.switch_addr) + "\n"
string += " pending pkts " + str(len(self.packets)) + "\n"
string += " total pkts " + str(self.packets_total) + "\n"
string += " expired pkts " + str(self.packets_expired) + "\n"
string += " handled pkts " + str(self.packets_handled) + "\n"
string += " poll discards " + str(self.poll_discards) + "\n"
string += " parse errors " + str(self.parse_errors) + "\n"
string += " sock errrors " + str(self.socket_errors) + "\n"
string += " max pkts " + str(self.max_pkts) + "\n"
string += " target switch " + str(self.switch) + "\n"
string += " host " + str(self.host) + "\n"
string += " port " + str(self.port) + "\n"
string += " keep_alive " + str(self.keep_alive) + "\n"
string += " pkt_in_run " + str(self.pkt_in_run) + "\n"
string += " pkt_in_dropped " + str(self.pkt_in_dropped) + "\n"
return string | identifier_body |
|
tunnel.py | ][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another thread.
"""
self.waker.notify()
def sockets(self):
"""
Return list of sockets to select on.
"""
socs = [self.listen_socket, self.bridge_socket, self.waker]
return [x for x in socs if x]
def run(self):
"""
Activity function for class
Assumes connection to switch already exists. Listens on
switch_socket for messages until an error (or zero len pkt)
occurs.
When there is a message on the socket, check for handlers; queue the
packet if no one handles the packet.
See note for controller describing the limitation of a single
connection for now.
"""
self.dbg_state = "running"
while self.active:
try:
sel_in, sel_out, sel_err = \
select.select(self.sockets(), [], self.sockets(), 1)
except:
print( sys.exc_info())
self.logger.error("Select error, disconnecting")
self.disconnect()
for s in sel_err:
self.logger.error("Got socket error on: " + str(s) + ", disconnecting")
self.disconnect()
for s in sel_in:
if self._socket_ready_handle(s) == -1:
self.disconnect()
# End of main loop
self.dbg_state = "closing"
self.logger.info("Exiting controller thread")
self.shutdown()
def connect(self, timeout=-1):
"""
Connect to the switch
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if connected
"""
pass
def disconnect(self, timeout=-1):
"""
If connected to a switch, disconnect.
"""
if self.switch_socket:
self.switch_socket.close()
self.switch_socket = None
self.switch_addr = None
with self.packets_cv:
self.packets = []
with self.connect_cv:
self.connect_cv.notifyAll()
if self.bridge_socket:
self.bridge_socket.close()
def wait_disconnected(self, timeout=-1):
"""
@param timeout Block for up to timeout seconds. Pass -1 for the default.
@return Boolean, True if disconnected
"""
with self.connect_cv:
util.timed_wait(self.connect_cv,
lambda: True if not self.switch_socket else None,
timeout=timeout)
return self.switch_socket is None
def kill(self):
"""
Force the controller thread to quit
"""
self.active = False
self.wakeup()
self.join()
def shutdown(self):
"""
Shutdown the controller closing all sockets
@todo Might want to synchronize shutdown with self.sync...
"""
self.active = False
try:
self.listen_socket.shutdown(socket.SHUT_RDWR)
except:
self.logger.info("Ignoring listen soc shutdown error")
self.listen_socket = None
with self.connect_cv:
self.connect_cv.notifyAll()
self.wakeup()
self.dbg_state = "down"
def register(self, msg_type, handler):
"""
Register a callback to receive a specific message type.
Only one handler may be registered for a given message type.
WARNING: A lock is held during the handler call back, so
the handler should not make any blocking calls
@param msg_type The type of message to receive. May be DEFAULT
for all non-handled packets. The special type, the string "all"
will send all packets to the handler.
@param handler The function to call when a message of the given
type is received.
"""
# Should check type is valid
if not handler and msg_type in self.handlers.keys():
del self.handlers[msg_type]
return
self.handlers[msg_type] = handler
def poll(self, exp_msg=None, timeout=-1):
"""
Wait for the next OF message received from the switch.
@param exp_msg If set, return only when this type of message
is received (unless timeout occurs).
@param timeout Maximum number of seconds to wait for the message.
Pass -1 for the default timeout.
@retval A pair (msg, pkt) where msg is a message object and pkt
the string representing the packet as received from the socket.
This allows additional parsing by the receiver if necessary.
The data members in the message are in host endian order.
If an error occurs, (None, None) is returned
"""
if exp_msg is None:
self.logger.warn("DEPRECATED polling for any message class")
klass = None
else:
raise ValueError("Unexpected exp_msg argument %r" % exp_msg)
# Take the packet from the queue
def grab():
for i, (msg, pkt) in enumerate(self.packets):
if klass is None or isinstance(msg, klass):
self.logger.debug("Got %s message", msg.__class__.__name__)
return self.packets.pop(i)
# Not found
return None
with self.packets_cv:
ret = util.timed_wait(self.packets_cv, grab, timeout=timeout)
if ret != None:
(msg, pkt) = ret
return (msg, pkt)
else:
return (None, None)
def transact(self, msg, timeout=-1):
"""
Run a message transaction with the switch
Send the message in msg and wait for a reply with a matching
transaction id. Transactions have the highest priority in
received message handling.
@param msg The message object to send; must not be a string
@param timeout The timeout in seconds; if -1 use default.
"""
if msg.xid == None:
msg.xid = util.gen_xid()
self.logger.debug("Running transaction %d" % msg.xid)
with self.xid_cv:
if self.xid:
self.logger.error("Can only run one transaction at a time")
return (None, None)
self.xid = msg.xid
self.xid_response = None
self.message_send(msg)
self.logger.debug("Waiting for transaction %d" % msg.xid)
util.timed_wait(self.xid_cv, lambda: self.xid_response, timeout=timeout)
if self.xid_response:
(resp, pkt) = self.xid_response
self.xid_response = None
else:
| (resp, pkt) = (None, None) | conditional_block |
|
tunnel.py | Class abstracting the control interface to the switch.
For receiving messages, two mechanism will be implemented. First,
query the interface with poll. Second, register to have a
function called by message type. The callback is passed the
message type as well as the raw packet (or message object)
One of the main purposes of this object is to translate between network
and host byte order. 'Above' this object, things should be in host
byte order.
@todo Consider using SocketServer for listening socket
@todo Test transaction code
@var rcv_size The receive size to use for receive calls
@var max_pkts The max size of the receive queue
@var keep_alive If true, listen for echo requests and respond w/
echo replies
@var initial_hello If true, will send a hello message immediately
upon connecting to the switch
@var switch If not None, do an active connection to the switch
@var host The host to use for connect
@var port The port to connect on
@var packets_total Total number of packets received
@var packets_expired Number of packets popped from queue as queue full
@var packets_handled Number of packets handled by something
@var dbg_state Debug indication of state
"""
def __init__(self, bdg_unix_addr = None ,tun_unix_addr = 'uds_tunnel', host='192.168.2.200', port=1024, max_pkts=1024):
Thread.__init__(self)
# Socket related
self.rcv_size = RCV_SIZE_DEFAULT
self.listen_socket = None
self.switch_socket = None
self.switch_addr = None
self.connect_cv = Condition()
self.message_cv = Condition()
self.tx_lock = Lock()
# Used to wake up the event loop from another thread
self.waker = util.EventDescriptor()
# Counters
self.socket_errors = 0
self.parse_errors = 0
self.packets_total = 0
self.packets_expired = 0
self.packets_handled = 0
self.poll_discards = 0
# State
self.sync = Lock()
self.handlers = {}
self.keep_alive = False
self.active = True
self.initial_hello = True
# OpenFlow message/packet queue
# Protected by the packets_cv lock / condition variable
self.packets = []
self.packets_cv = Condition()
self.packet_in_count = 0
# Settings
self.max_pkts = max_pkts
self.bdg_unix_addr = bdg_unix_addr
self.tun_unix_addr = tun_unix_addr
self.host = host
self.port = port
self.dbg_state = "init"
self.logger = logging.getLogger("VirtualTunnel")
self.filter_packet_in = False # Drop "excessive" packet ins
self.pkt_in_run = 0 # Count on run of packet ins
self.pkt_in_filter_limit = 50 # Count on run of packet ins
self.pkt_in_dropped = 0 # Total dropped packet ins
self.transact_to = 15 # Transact timeout default value; add to config
# Transaction and message type waiting variables
# xid_cv: Condition variable (semaphore) for packet waiters
# xid: Transaction ID being waited on
# xid_response: Transaction response message
self.xid_cv = Condition()
self.xid = None
self.xid_response = None
self.debug = False
self.buffered_input = ""
# Create listen socket
self.logger.info("Create/listen at " + self.host + ":" +
str(self.port))
ai = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
# Use first returned addrinfo
(family, socktype, proto, name, sockaddr) = ai[0]
self.listen_socket = socket.socket(family, socktype)
self.listen_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.listen_socket.bind(sockaddr)
self.switch_socket = self.listen_socket
# Make sure the socket does not already exist
try:
os.unlink(self.tun_unix_addr)
except OSError:
if os.path.exists(self.tun_unix_addr):
raise
self.bridge_socket = socket.socket(socket.AF_UNIX,socket.SOCK_DGRAM)
# Bind the socket to the port
self.logger.info("Create/listen at " + str(self.tun_unix_addr))
self.bridge_socket.bind(self.tun_unix_addr)
def filter_packet(self, rawmsg, hdr):
"""
Check if packet should be filtered
Currently filters packet in messages
@return Boolean, True if packet should be dropped
"""
# XXX didn't actually check for packet-in...
return False
# Add check for packet in and rate limit
if self.filter_packet_in:
# If we were dropping packets, report number dropped
# TODO dont drop expected packet ins
if self.pkt_in_run > self.pkt_in_filter_limit:
self.logger.debug("Dropped %d packet ins (%d total)"
% ((self.pkt_in_run -
self.pkt_in_filter_limit),
self.pkt_in_dropped))
self.pkt_in_run = 0
return False
def _pkt_handle(self, pkt):
"""
Check for all packet handling conditions
Parse and verify message
Check if XID matches something waiting
Check if message is being expected for a poll operation
Check if keep alive is on and message is an echo request
Check if any registered handler wants the packet
Enqueue if none of those conditions is met
an echo request in case keep_alive is true, followed by
registered message handlers.
@param pkt The raw packet (string) which may contain multiple OF msgs
"""
# snag any left over data from last read()
# Parse the header to get type
offset, payload_len, subtype, nxp_sniffer = lowpan.message.parse_header(pkt[0])
# Extract the raw message bytes
rawmsg = pkt[0][offset : offset + payload_len]
if self.debug:
print(pkt[1])
print(util.hex_dump_buffer(rawmsg))
# Now check for message handlers; preference is given to
# handlers for a specific packet
handled = False
# Send to bridge socket
if self.bdg_unix_addr:
self.bridge_socket.sendto(rawmsg,self.bdg_unix_addr)
handled = True
if subtype in self.handlers.keys():
handled = self.handlers[subtype](self, nxp_sniffer, rawmsg)
if not handled and ("all" in self.handlers.keys()):
handled = self.handlers["all"](self, nxp_sniffer, rawmsg)
if not handled: # Not handled, enqueue
with self.packets_cv:
if len(self.packets) >= self.max_pkts:
self.packets.pop(0)
self.packets_expired += 1
self.packets.append((nxp_sniffer, rawmsg))
self.packets_cv.notify_all()
self.packets_total += 1
else:
self.packets_handled += 1
self.logger.debug("Message handled by callback")
def _socket_ready_handle(self, s):
"""
Handle an input-ready socket
@param s The socket object that is ready
@returns 0 on success, -1 on error
"""
if s and s == self.switch_socket:
for idx in range(3): # debug: try a couple of times
try:
pkt = self.switch_socket.recvfrom(self.rcv_size)
except:
self.logger.warning("Error on switch read")
return -1
if not self.active:
return 0
if len(pkt) == 0:
self.logger.warning("Zero-length switch read, %d" % idx)
else:
break
if len(pkt) == 0: # Still no packet
self.logger.warning("Zero-length switch read; closing cxn")
self.logger.info(str(self))
return -1
self._pkt_handle(pkt)
elif s and s == self.waker:
self.waker.wait()
else:
self.logger.error("Unknown socket ready: " + str(s))
return -1
return 0
def active_connect(self):
"""
Actively connect to a switch IP addr
"""
try:
self.logger.info("Trying active connection to %s" % self.switch)
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect((self.switch, self.port))
self.logger.info("Connected to " + self.switch + " on " +
str(self.port))
soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.switch_addr = (self.switch, self.port)
return soc
except (StandardError, socket.error) as e:
self.logger.error("Could not connect to %s at %d:: %s" %
(self.switch, self.port, str(e)))
return None
def wakeup(self):
"""
Wake up the event loop, presumably from another | class VirtualTunnel(Thread):
""" | random_line_split |
|
data.py | eld_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO
else:
log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after))
else:
log_prob = log(gram_count) - log(num_tokens_after + num_types_after)
else:
raise Exception('Invalid smoothing %s' % self.smoothing)
return log_prob
def _calculate_interpolated_prob(self, gram, log_lmbds):
return reduce(add_log, (log_lmbds[k] + self.get_prob(gram[-k-1:]) for k in xrange(self.n)))
def _calculate_lmbds_loglikelihood(self, base, log_lmbds, grams, types_after, num_tokens_after):
if num_tokens_after[base] == 0:
return 0
log_likelihood = 0
for w in types_after[base]:
gram = base + (w,)
val = self._calculate_interpolated_prob(gram, log_lmbds)
log_likelihood += grams[self.n][gram] * val
log_likelihood /= num_tokens_after[base]
return log_likelihood
def load_test_file(n, lines):
n_grams = []
for line in lines:
try:
l = ['<s>'] + tokenize(line) + ['</s>']
except:
continue
n_grams.extend((l[max(0, x-n+1):x+1] for x in xrange(len(l))))
return n_grams
def normalize(a):
| s = float(sum(a))
for i in a:
a[i] /= s | identifier_body |
|
data.py | = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO | else: | random_line_split |
|
data.py | (x, y):
x,y = max(x,y), min(x,y)
if y <= LOGZERO:
return x
negdiff = y-x
return x + log(1 + exp(negdiff))
class LanguageModel(object):
def __init__(self, n):
self.smoothing = None
self.interpolate = False
self.lmbd = 0
self.models = defaultdict(dict)
self.voc = []
self.n = n
self.prob_no_information = LOGZERO
def set_model(self, n, model):
self.models[n] = model
def setn(self, n):
self.n = n
def set_voc(self, voc):
self.voc = voc
self.voc_size = len(voc)
def set_prob(self, n, words, log_prob):
w = tuple(words)
self.models[n][w] = log_prob
def set_smoothing(self, s, lmbd=1.0):
self.smoothing = s
if self.smoothing == 'ls':
self.lmbd = lmbd
elif self.smoothing == 'wb':
self.interpolate = True
def get_smoothing(self):
return self.smoothing, self.lmbd
def get_prob(self, words):
n = len(words)
words = tuple(w if w[0] == '<' or w in self.voc else '<UNK>' for w in words)
d = self.models[n]
if words in d:
return d[words]
if len(words) > 1 and words[:-1] not in self.models[n-1]:
# Previous words were not seen
if self.prob_no_information != LOGZERO:
# Use probability when there's no information
return self.prob_no_information
else:
# Backoff
return self.get_prob(words[1:])
# Check whether we did some smoothing which raised nonzero probs to some value
other = words[:-1] + ('<OTHER>',)
if other in d:
return d[other]
# Shouldn't reach here!
raise Exception(words)
def __getitem__(self, item):
return self.models[item]
def __len__(self):
return len(self.models)
def dump(self, output_file):
with open(output_file, 'wb') as f:
f.write('\\data\\\n')
for n in self.models:
f.write('ngram %d=%d\n' % (n, len(self.models[n])))
f.write('\n')
f.write('\\smoothing\\\n')
f.write('%s\t%.32f\n' % (self.smoothing, self.lmbd))
f.write('%.32f\n' % self.prob_no_information)
f.write('\n')
for n in self.models:
f.write('\\%d-grams:\n' % n)
for words, prob in self.models[n].iteritems():
f.write('%.32f\t%s\n' % (prob ," ".join(words)))
@staticmethod
def load(f):
# \data\
f.readline()
ngram = 0
while True:
l = f.readline().strip()
if not l:
break
ngram = max(ngram, int(l.split('ngram ')[1].split('=')[0]))
assert ngram != 0, "Can't find ngram in file!"
lm = LanguageModel(ngram)
# \smoothing\
f.readline()
smooth_line = f.readline().strip()
lm.prob_no_information = float(f.readline().strip())
f.readline()
smoothing, lmbd = smooth_line.split()
lm.set_smoothing(smoothing, float(lmbd))
# N-grams
current_ngram = 0
voc = set()
while True:
l = f.readline().strip()
if not l:
break
elif l.startswith('\\'): # descriptor
current_ngram = int(l[1])
else: # data line
assert current_ngram != 0, 'Invalid n-gram'
log_prob, words = l.split('\t', 1)
log_prob = float(log_prob)
words = tuple(words.split(' '))
lm.set_prob(current_ngram, words, log_prob)
if current_ngram == 1:
voc.add(words[0])
lm.set_voc(voc)
return lm
def _count_grams(self, lines_tokens):
''' Count ngrams in a given list of tokenized lines '''
# T(w) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda | add_log | identifier_name |
|
data.py | ) in Witten-Bell smoothing
# types_after[w] = set of all types that occur after w
types_after = defaultdict(set)
# N(w) in Witten-Bell smoothing
# num_tokens_after[w] = number of tokens that occur after w
num_tokens_after = defaultdict(int)
tokens = 0
grams = [defaultdict(int) for _ in xrange(self.n+1)]
voc = set()
for i, words in enumerate(lines_tokens):
for word in words:
voc.add(word)
num_words = len(words)
tokens += num_words
for l in xrange(1, self.n+1):
for j in xrange (l, num_words+1):
gram = tuple(words[j-l:j])
if l > 1:
# Account T(w) and N(w) in WB smoothing
prev = gram[:-1]
types_after[prev].add(gram[-1])
num_tokens_after[prev] += 1
grams[l][gram] += 1
grams[0] = dict()
grams[0][tuple()] = tokens
return tokens, grams, voc, types_after, num_tokens_after
def train_model(self, lines, silent=False):
# Print to log if needed
if silent:
def info(s):
pass
else:
def info(s):
print s
if self.interpolate:
n_held_out = int(len(lines)*.1)
held_out = lines[:n_held_out]
data = lines[n_held_out:]
else:
data = lines
info('Tokenizing...')
data_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in data]
if self.interpolate:
held_out_tokens = [['<s>'] + tokenize(l) + ['</s>'] for l in held_out]
# Build the lexicon as words that appear more than once, should be at least 99% of number of tokens
info('Building lexicon...')
lexicon = set()
unk = set()
counts = defaultdict(int)
for l in data_tokens:
for w in l:
if w[0] == '<':
continue
counts[w] += 1
if counts[w] == 1:
unk.add(w)
elif counts[w] == 2:
lexicon.add(w)
unk.remove(w)
del counts
while len(lexicon) < 0.99*(len(lexicon) + len(unk)):
for w in unk:
unk.remove(w)
lexicon.add(w)
break
del unk
info('Replacing OOV words with <UNK>...')
for l in data_tokens:
for i in xrange(len(l)):
if l[i][0] != '<' and l[i] not in lexicon:
l[i] = '<UNK>'
del lexicon
info('Counting ngrams...')
tokens, grams, voc, types_after, num_tokens_after = self._count_grams(data_tokens)
self.set_voc(voc)
num_types_after = defaultdict(int, ((x, len(types_after[x])) for x in types_after))
num_types_after[tuple()] = self.voc_size
num_tokens_after[tuple()] = tokens
info('Calculating probabilities...')
self.set_prob(1, ('<OTHER>',), LOGZERO)
for l in xrange(1, self.n+1):
for gram, gram_count in grams[l].iteritems():
prev = gram[:-1]
log_prob = self._calculate_log_prob(l, grams, gram_count, grams[l-1][prev],
num_types_after[prev], num_tokens_after[prev])
self.set_prob(l, gram, log_prob)
# Calculate probabilities for unseen ngrams (after smoothing they get a nonzero value...)
for l in xrange(2, self.n+1):
for base_gram, base_gram_count in grams[l-1].iteritems():
log_prob = self._calculate_log_prob(l, grams, 0, base_gram_count, num_types_after[base_gram],
num_tokens_after[base_gram])
self.set_prob(l, base_gram + ('<OTHER>',), log_prob)
# Pr(W_n | W_{n-1}) when C(W_n)=0, C(W_{n-1})=0
self.prob_no_information = self._calculate_log_prob(2, grams, 0, 0, 0, 0)
if not self.interpolate:
return
info('Interpolating...')
_, held_out_grams, _, held_out_types_after, held_out_num_tokens_after = \
self._count_grams(held_out_tokens)
for base_gram in grams[self.n-1]:
# Use EM to calculate lambda-values which provide max log-likelihood on held-out data
# Based on: https://www.cs.cmu.edu/~roni/11761/Presentations/degenerateEM.pdf
if held_out_num_tokens_after[base_gram] == 0:
# This base gram is so rare that it doesn't appear in the held-out data -- interpolation
# won't matter much in this case anyway!
continue
log_lmbds = [log(1) - log(self.n)]*self.n
prev_loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
for t in count(1):
# E-step
# log_denoms[w] = lg(denominator for word w)
log_denoms = dict()
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
log_denoms[w] = self._calculate_interpolated_prob(gram, log_lmbds)
# M-step
for j in xrange(self.n):
new_log_lmbd = LOGZERO
for w in held_out_types_after[base_gram]:
gram = base_gram + (w,)
val = log_lmbds[j] + self.get_prob(gram[-j-1:]) \
+ log(held_out_grams[self.n][gram]) - log_denoms[w]
new_log_lmbd = add_log(new_log_lmbd, val)
log_lmbds[j] = new_log_lmbd - log(held_out_num_tokens_after[base_gram])
# Check for convergence
loglikelihood = self._calculate_lmbds_loglikelihood(base_gram, log_lmbds, held_out_grams,
held_out_types_after, held_out_num_tokens_after)
assert loglikelihood >= prev_loglikelihood
if loglikelihood - prev_loglikelihood <= EM_EPSILON:
break
prev_loglikelihood = loglikelihood
# Calculate the new interpolated probabilities
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
new_prob = self._calculate_interpolated_prob(gram, log_lmbds)
self.set_prob(self.n, gram, new_prob)
total = add_log(total, new_prob)
# All other unseen probabilities - (1-Sum_w(Pr(w|base))) / Z(base)
new_other_prob = log(1.0 - exp(total)) - log(self.voc_size - num_types_after[base_gram])
self.set_prob(self.n, base_gram + ('<OTHER>',), new_other_prob)
# Verify probabilities sum to 1
info('Testing...')
l = self.n
for base_gram in grams[l-1].keys():
if base_gram[-1] == '</s>':
continue
total = LOGZERO
for w in types_after[base_gram]:
gram = base_gram + (w,)
total = add_log(total, self.get_prob(gram))
total = add_log(total, self.get_prob(base_gram + ('<OTHER>',)) + \
log(self.voc_size - num_types_after[base_gram]))
if abs(total-0.0) > EPSILON:
raise Exception('Bad total for %s: %.32f' % (base_gram, exp(total)))
def _calculate_log_prob(self, l, grams, gram_count, prev_gram_count, num_types_after, num_tokens_after):
# No need to smooth 1-grams obviously...
if l == 1 or self.smoothing == 'none' or (self.smoothing == 'ls' and self.lmbd == 0):
if prev_gram_count == 0:
# No prior information, assume uniform distribution
log_prob = -log(self.voc_size)
elif gram_count == 0:
log_prob = LOGZERO
else:
log_prob = log(gram_count) - log(prev_gram_count)
elif self.smoothing == 'ls':
log_prob = log(gram_count + self.lmbd) - log(prev_gram_count + self.lmbd * self.voc_size)
elif self.smoothing == 'wb':
z = self.voc_size - num_types_after
if gram_count == 0:
| if num_types_after == 0:
log_prob = LOGZERO
else:
log_prob = log(num_types_after) - (log(z) + log(num_tokens_after + num_types_after)) | conditional_block |
|
lptim.rs | {
/// Drive LPTIM with APB1 clock.
Apb1 = 0b00,
/// Drive LPTIM with Low-Speed Internal (LSI) clock.
///
/// The user has to ensure that the LSI clock is running, or the timer won't
/// start counting.
Lsi = 0b01,
/// Drive LPTIM with Internal 16 MHz clock.
Hsi16 = 0b10,
/// Drive LPTIM with Low-Speed External (LSE) clock at 32.768 kHz.
///
/// The user has to ensure that the LSE clock is running, or the timer won't
/// start counting.
Lse = 0b11,
}
/// Interrupt enable flags.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub struct Interrupts {
/// Encoder direction change to down.
pub enc_dir_down: bool,
/// Encoder direction change to up.
pub enc_dir_up: bool,
/// ARR register update successful.
pub autoreload_update_ok: bool,
/// CMP register update successful.
pub compare_update_ok: bool,
/// Valid edge on ext. trigger input.
pub ext_trig: bool,
/// ARR register matches current CNT value.
pub autoreload_match: bool,
/// CMP register matches current CNT value.
pub compare_match: bool,
}
/// Low-Power Timer (`LPTIM`).
///
/// The Low-Power Timer is a 16-bit timer with a prescaler of up to 128. It can run off of the APB1,
/// LSI, HSI16, or LSE clocks. With LSE, the slowest clock at 32.768 kHz, this results in a maximum
/// timeout of 256 seconds, or 4 minutes and 16 seconds.
///
/// The timer can be initialized either in one-shot mode or in periodic mode, using `init_oneshot`
/// or `init_periodic` respectively. In periodic mode, the embedded-hal `Periodic` marker trait is
/// implemented and the `CountDown` implementation uses `Hertz` as the time unit. In one-shot mode,
/// the `CountDown` implementation instead uses `Microseconds`, allowing for a multi-second timeout
/// to be configured (with the tradeoff being a larger code size due to use of 64-bit arithmetic).
pub struct LpTimer<M: CountMode> {
lptim: LPTIM,
input_freq: Hertz,
_mode: PhantomData<M>,
}
impl LpTimer<Periodic> {
/// Initializes the Low-Power Timer in periodic mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_periodic(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<OneShot> {
/// Initializes the Low-Power Timer in one-shot mode.
///
/// The timer needs to be started by calling `.start(freq)`.
pub fn init_oneshot(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only | ClockSrc | identifier_name |
|
lptim.rs | Src) -> Self |
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit());
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt | {
Self::init(lptim, pwr, rcc, clk)
} | identifier_body |
lptim.rs | Src) -> Self {
Self::init(lptim, pwr, rcc, clk)
}
}
impl LpTimer<Encoder> {
/// Initializes the Low-Power Timer in encoder mode.
///
/// The `start` method must be called to enable the encoder input.
pub fn init_encoder(
lptim: LPTIM,
pwr: &mut PWR,
rcc: &mut Rcc,
clk: ClockSrc,
(pb5, pb7): (gpiob::PB5<gpio::Analog>, gpiob::PB7<gpio::Analog>),
) -> Self {
pb5.set_alt_mode(gpio::AltMode::AF2);
pb7.set_alt_mode(gpio::AltMode::AF2);
Self::init(lptim, pwr, rcc, clk)
}
// TODO: Dedupe this fn with configure() function in `impl<M: CountMode> LpTimer<M>`
fn configure_encoder(&mut self, arr: u16) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
// Configure in encoder mode
self.lptim.cfgr.write(|w| {
w
// Make sure prescaler is disabled. Encoder mode forbids prescaling.
.presc()
.div1()
// Put timer into encoder mode
.enc()
.set_bit()
// Choose internal clock source - external sources not supported in encoder mode.
.cksel()
.clear_bit()
// Start counting from software trigger
.trigen()
.sw()
// Clock polarity
.ckpol()
.both_edges()
});
// Enable timer
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled."
// The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(arr));
}
/// Enable the timer and begin counting encoder pulses.
///
/// The provided value is stored in the ARR (Auto Reload Register). The timer's internal counter
/// will wrap when this value is reached.
pub fn enable(&mut self, arr: u16) {
self.configure_encoder(arr);
// Enable timer, enable continuous mode
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt().set_bit());
}
/// Disable the timer.
pub fn disable(&mut self) {
self.lptim.cr.write(|w| w.enable().clear_bit());
}
/// Get the current count of the encoder.
pub fn count(&self) -> u16 {
(self.lptim.cnt.read().bits() & 0xffff) as u16
}
/// Clear all LPTIM interrupt flags
pub fn clear_flags(&self) {
self.lptim.icr.write(|w| unsafe { w.bits(0x7f) });
}
}
impl<M: CountMode> LpTimer<M> {
fn init(lptim: LPTIM, pwr: &mut PWR, rcc: &mut Rcc, clk: ClockSrc) -> Self {
// `pwr` is not used. It is used as a marker that guarantees that `PWR.CR` is set so this
// function can set the `RCC.LSEON` bit, which is otherwise write protected.
let _ = pwr;
// Enable selected clock and determine its frequency
let input_freq = match clk {
ClockSrc::Apb1 => rcc.clocks.apb1_clk(), // always enabled
ClockSrc::Lsi => {
// Turn on LSI
rcc.rb.csr.modify(|_, w| w.lsion().set_bit());
// Wait for LSI to be ready
while rcc.rb.csr.read().lsirdy().bit_is_clear() {}
Hertz(37_000)
}
ClockSrc::Hsi16 => {
// Turn on HSI16
rcc.rb.cr.modify(|_, w| w.hsi16on().set_bit());
// Wait for HSI16 to be ready
while rcc.rb.cr.read().hsi16rdyf().bit_is_clear() {}
Hertz(16_000_000)
}
ClockSrc::Lse => {
// Turn on LSE
rcc.rb.csr.modify(|_, w| w.lseon().set_bit());
// Wait for LSE to be ready
while rcc.rb.csr.read().lserdy().bit_is_clear() {}
Hertz(32_768)
}
};
// Select and enable clock. Right now we only support the internal RCC clocks, but LPTIM can
// also run as a counter with a dedicated external input.
rcc.rb.ccipr.modify(|_, w| w.lptim1sel().bits(clk as u8));
LPTIM::enable(rcc);
LPTIM::reset(rcc);
Self {
lptim,
input_freq,
_mode: PhantomData,
}
}
/// Disables the timer and configures it so that starting it will make it fire at the given
/// frequency.
fn configure(&mut self, conf: TimeConf) {
// Disable the timer. The prescaler can only be changed while it's disabled.
self.lptim.cr.write(|w| w.enable().clear_bit());
self.lptim
.cfgr
.write(|w| w.presc().bits(conf.psc_encoded).timout().set_bit()); | // The slowest LPTIM clock source is LSE at 32768 Hz, the fastest CPU clock is ~80 MHz. At
// these conditions, one cycle of the LPTIM clock takes 2500 CPU cycles, so sleep for 5000.
cortex_m::asm::delay(5000);
// ARR can only be changed while the timer is *en*abled
self.lptim.arr.write(|w| w.arr().bits(conf.arr));
}
/// Disables and destructs the timer, returning the raw `LPTIM` peripheral.
pub fn free(self) -> LPTIM {
self.lptim.cr.reset();
self.lptim
}
/// Disables the timer and enables the given interrupts.
pub fn enable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().enabled();
}
if interrupts.enc_dir_up {
w.upie().enabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().enabled();
}
if interrupts.compare_update_ok {
w.cmpokie().enabled();
}
if interrupts.ext_trig {
w.exttrigie().enabled();
}
if interrupts.autoreload_match {
w.arrmie().enabled();
}
if interrupts.compare_match {
w.cmpmie().enabled();
}
w
})
}
/// Disables the timer and disables the given interrupts.
pub fn disable_interrupts(&mut self, interrupts: Interrupts) {
// IER can only be modified when the timer is disabled
self.lptim.cr.reset();
self.lptim.ier.modify(|_, w| {
if interrupts.enc_dir_down {
w.downie().disabled();
}
if interrupts.enc_dir_up {
w.upie().disabled();
}
if interrupts.autoreload_update_ok {
w.arrokie().disabled();
}
if interrupts.compare_update_ok {
w.cmpokie().disabled();
}
if interrupts.ext_trig {
w.exttrigie().disabled();
}
if interrupts.autoreload_match {
w.arrmie().disabled();
}
if interrupts.compare_match {
w.cmpmie().disabled();
}
w
})
}
}
impl hal::timer::CountDown for LpTimer<Periodic> {
type Time = Hertz;
fn start<T>(&mut self, freq: T)
where
T: Into<Hertz>,
{
self.configure(TimeConf::calculate_freq(self.input_freq, freq.into()));
// Start LPTIM in continuous mode.
self.lptim
.cr
.write(|w| w.enable().set_bit().cntstrt(). |
self.lptim.cr.write(|w| w.enable().set_bit());
// "After setting the ENABLE bit, a delay of two counter clock is needed before the LPTIM is
// actually enabled." | random_line_split |
thread.go |
mux sync.Mutex
status ThreadStatus
closeErr error // Error that caused the thread to stop
currentCont Cont // Currently running continuation
resumeCh chan valuesError
caller *Thread // Who resumed this thread
// Depth of GoFunction calls in the thread. This should not exceed
// maxGoFunctionCallDepth. The aim is to avoid Go stack overflows that
// cannot be recovered from (note that this does not limit recursion for Lua
// functions).
goFunctionCallDepth int
DebugHooks
closeStack // Stack of pending to-be-closed values
}
// NewThread creates a new thread out of a Runtime. Its initial
// status is suspended. Call Resume to run it.
func NewThread(r *Runtime) *Thread {
r.RequireSize(unsafe.Sizeof(Thread{}) + 100) // 100 is my guess at the size of a channel
return &Thread{
resumeCh: make(chan valuesError),
status: ThreadSuspended,
Runtime: r,
}
}
// CurrentCont returns the continuation currently running (or suspended) in the
// thread.
func (t *Thread) CurrentCont() Cont {
return t.currentCont
}
// IsMain returns true if the thread is the runtime's main thread.
func (t *Thread) IsMain() bool {
return t == t.mainThread
}
const maxErrorsInMessageHandler = 10
var errErrorInMessageHandler = StringValue("error in error handling")
// RunContinuation runs the continuation c in the thread. It keeps running until
// the next continuation is nil or an error occurs, in which case it returns the
// error.
func (t *Thread) RunContinuation(c Cont) (err error) {
var next Cont
var errContCount = 0
_ = t.triggerCall(t, c)
for c != nil {
if t != t.gcThread {
t.runPendingFinalizers()
}
t.currentCont = c
next, err = c.RunInThread(t)
if err != nil {
rtErr := ToError(err)
if rtErr.Handled() {
return rtErr
}
err = rtErr.AddContext(c, -1)
errContCount++
if t.messageHandler != nil {
if errContCount > maxErrorsInMessageHandler {
return newHandledError(errErrorInMessageHandler)
}
next = t.messageHandler.Continuation(t, newMessageHandlerCont(c))
} else {
next = newMessageHandlerCont(c)
}
next.Push(t.Runtime, ErrorValue(err))
}
c = next
}
return
}
// This is to be able to close a suspended coroutine without completing it, but
// still allow cleaning up the to-be-closed variables. If this is put on the
// resume channel of a running thread, yield will cause a panic in the goroutine
// and that will be caught in the defer() clause below.
type threadClose struct{}
//
// Coroutine management
//
// Start starts the thread in a goroutine, giving it the callable c to run. the
// t.Resume() method needs to be called to provide arguments to the callable.
func (t *Thread) Start(c Callable) {
t.RequireBytes(2 << 10) // A goroutine starts off with 2k stack
go func() {
var (
args []Value
err error
)
// If there was a panic due to an exceeded quota, we need to end the
// thread and propagate that panic to the calling thread
defer func() {
r := recover()
if r != nil {
switch r.(type) {
case ContextTerminationError:
case threadClose:
// This means we want to close the coroutine, so no panic!
r = nil
default:
panic(r)
}
}
t.end(args, err, r)
}()
args, err = t.getResumeValues()
if err == nil {
next := NewTerminationWith(t.CurrentCont(), 0, true)
err = t.call(c, args, next)
args = next.Etc()
}
}()
}
// Status returns the status of a thread (suspended, running or dead).
func (t *Thread) Status() ThreadStatus {
return t.status
}
// Resume execution of a suspended thread. Its status switches to
// running while its caller's status switches to suspended.
func (t *Thread) Resume(caller *Thread, args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return nil, errors.New("cannot resume dead thread")
default:
return nil, errors.New("cannot resume running thread")
}
}
caller.mux.Lock()
if caller.status != ThreadOK |
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(args, nil, nil)
return caller.getResumeValues()
}
// Close a suspended thread. If successful, its status switches to dead. The
// boolean returned is true if it was possible to close the thread (i.e. it was
// suspended or already dead). The error is non-nil if there was an error in
// the cleanup process, or if the thread had already stopped with an error
// previously.
func (t *Thread) Close(caller *Thread) (bool, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return true, t.closeErr
default:
return false, nil
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to close is not running")
}
// The thread needs to go back to running to empty its close stack, before
// becoming dead.
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(nil, nil, threadClose{})
_, err := caller.getResumeValues()
return true, err
}
// Yield to the caller thread. The yielding thread's status switches to
// suspended. The caller's status must be OK.
func (t *Thread) Yield(args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadOK {
panic("Thread to yield is not running")
}
caller := t.caller
if caller == nil {
t.mux.Unlock()
return nil, errors.New("cannot yield from main thread")
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to yield is not OK")
}
t.status = ThreadSuspended
t.caller = nil
t.mux.Unlock()
caller.mux.Unlock()
caller.sendResumeValues(args, nil, nil)
return t.getResumeValues()
}
// This turns off the thread, cleaning up its close stack. The thread must be
// running.
func (t *Thread) end(args []Value, err error, exception interface{}) {
caller := t.caller
t.mux.Lock()
caller.mux.Lock()
defer t.mux.Unlock()
defer caller.mux.Unlock()
switch {
case t.status != ThreadOK:
panic("Called Thread.end on a non-running thread")
case caller.status != ThreadOK:
panic("Caller thread of ending thread is not OK")
}
close(t.resumeCh)
t.status = ThreadDead
t.caller = nil
err = t.cleanupCloseStack(nil, 0, err) // TODO: not nil
t.closeErr = err
caller.sendResumeValues(args, err, exception)
t.ReleaseBytes(2 << 10) // The goroutine will terminate after this
}
func (t *Thread) call(c Callable, args []Value, next Cont) error {
cont := c.Continuation(t, next)
t.Push(cont, args...)
return t.RunContinuation(cont)
}
func (t *Thread) getResumeValues() ([]Value, error) {
res := <-t.resumeCh
if res.exception != nil {
panic(res.exception)
}
return res.args, res.err
}
func (t *Thread) sendResumeValues(args []Value, err error, exception interface{}) {
t.resumeCh <- valuesError{args: args, err: err, exception: exception}
}
//
// Calling
//
// CallContext pushes a new runtime context on the thread's runtime and attempts
// to run f() in the thread. If the context runs out of resources while f() is
// running, all operations should abort and the CallContext should return
// immediately and not finalizing pending to-be-closed values.
//
// Otherwise (even if f() returns an error), pending to-be-closed values should
// be finalized.
//
// See quotas.md for details about this API.
func (t *Thread) CallContext(def RuntimeContextDef, f func() error) (ctx RuntimeContext, err error) {
t.PushContext(def)
c, h := t.CurrentCont(), t.closeStack.size()
defer func() {
ctx = t.PopContext()
if r := recover(); r != nil {
t.closeStack.truncate(h) // No resources to run that, so just discard it | {
panic("Caller of thread to resume is not running")
} | conditional_block |
thread.go | func (t *Thread) CurrentCont() Cont {
return t.currentCont
}
// IsMain returns true if the thread is the runtime's main thread.
func (t *Thread) IsMain() bool {
return t == t.mainThread
}
const maxErrorsInMessageHandler = 10
var errErrorInMessageHandler = StringValue("error in error handling")
// RunContinuation runs the continuation c in the thread. It keeps running until
// the next continuation is nil or an error occurs, in which case it returns the
// error.
func (t *Thread) RunContinuation(c Cont) (err error) {
var next Cont
var errContCount = 0
_ = t.triggerCall(t, c)
for c != nil {
if t != t.gcThread {
t.runPendingFinalizers()
}
t.currentCont = c
next, err = c.RunInThread(t)
if err != nil {
rtErr := ToError(err)
if rtErr.Handled() {
return rtErr
}
err = rtErr.AddContext(c, -1)
errContCount++
if t.messageHandler != nil {
if errContCount > maxErrorsInMessageHandler {
return newHandledError(errErrorInMessageHandler)
}
next = t.messageHandler.Continuation(t, newMessageHandlerCont(c))
} else {
next = newMessageHandlerCont(c)
}
next.Push(t.Runtime, ErrorValue(err))
}
c = next
}
return
}
// This is to be able to close a suspended coroutine without completing it, but
// still allow cleaning up the to-be-closed variables. If this is put on the
// resume channel of a running thread, yield will cause a panic in the goroutine
// and that will be caught in the defer() clause below.
type threadClose struct{}
//
// Coroutine management
//
// Start starts the thread in a goroutine, giving it the callable c to run. the
// t.Resume() method needs to be called to provide arguments to the callable.
func (t *Thread) Start(c Callable) {
t.RequireBytes(2 << 10) // A goroutine starts off with 2k stack
go func() {
var (
args []Value
err error
)
// If there was a panic due to an exceeded quota, we need to end the
// thread and propagate that panic to the calling thread
defer func() {
r := recover()
if r != nil {
switch r.(type) {
case ContextTerminationError:
case threadClose:
// This means we want to close the coroutine, so no panic!
r = nil
default:
panic(r)
}
}
t.end(args, err, r)
}()
args, err = t.getResumeValues()
if err == nil {
next := NewTerminationWith(t.CurrentCont(), 0, true)
err = t.call(c, args, next)
args = next.Etc()
}
}()
}
// Status returns the status of a thread (suspended, running or dead).
func (t *Thread) Status() ThreadStatus {
return t.status
}
// Resume execution of a suspended thread. Its status switches to
// running while its caller's status switches to suspended.
func (t *Thread) Resume(caller *Thread, args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return nil, errors.New("cannot resume dead thread")
default:
return nil, errors.New("cannot resume running thread")
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to resume is not running")
}
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(args, nil, nil)
return caller.getResumeValues()
}
// Close a suspended thread. If successful, its status switches to dead. The
// boolean returned is true if it was possible to close the thread (i.e. it was
// suspended or already dead). The error is non-nil if there was an error in
// the cleanup process, or if the thread had already stopped with an error
// previously.
func (t *Thread) Close(caller *Thread) (bool, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return true, t.closeErr
default:
return false, nil
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to close is not running")
}
// The thread needs to go back to running to empty its close stack, before
// becoming dead.
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(nil, nil, threadClose{})
_, err := caller.getResumeValues()
return true, err
}
// Yield to the caller thread. The yielding thread's status switches to
// suspended. The caller's status must be OK.
func (t *Thread) Yield(args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadOK {
panic("Thread to yield is not running")
}
caller := t.caller
if caller == nil {
t.mux.Unlock()
return nil, errors.New("cannot yield from main thread")
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to yield is not OK")
}
t.status = ThreadSuspended
t.caller = nil
t.mux.Unlock()
caller.mux.Unlock()
caller.sendResumeValues(args, nil, nil)
return t.getResumeValues()
}
// This turns off the thread, cleaning up its close stack. The thread must be
// running.
func (t *Thread) end(args []Value, err error, exception interface{}) {
caller := t.caller
t.mux.Lock()
caller.mux.Lock()
defer t.mux.Unlock()
defer caller.mux.Unlock()
switch {
case t.status != ThreadOK:
panic("Called Thread.end on a non-running thread")
case caller.status != ThreadOK:
panic("Caller thread of ending thread is not OK")
}
close(t.resumeCh)
t.status = ThreadDead
t.caller = nil
err = t.cleanupCloseStack(nil, 0, err) // TODO: not nil
t.closeErr = err
caller.sendResumeValues(args, err, exception)
t.ReleaseBytes(2 << 10) // The goroutine will terminate after this
}
func (t *Thread) call(c Callable, args []Value, next Cont) error {
cont := c.Continuation(t, next)
t.Push(cont, args...)
return t.RunContinuation(cont)
}
func (t *Thread) getResumeValues() ([]Value, error) {
res := <-t.resumeCh
if res.exception != nil {
panic(res.exception)
}
return res.args, res.err
}
func (t *Thread) sendResumeValues(args []Value, err error, exception interface{}) {
t.resumeCh <- valuesError{args: args, err: err, exception: exception}
}
//
// Calling
//
// CallContext pushes a new runtime context on the thread's runtime and attempts
// to run f() in the thread. If the context runs out of resources while f() is
// running, all operations should abort and the CallContext should return
// immediately and not finalizing pending to-be-closed values.
//
// Otherwise (even if f() returns an error), pending to-be-closed values should
// be finalized.
//
// See quotas.md for details about this API.
func (t *Thread) CallContext(def RuntimeContextDef, f func() error) (ctx RuntimeContext, err error) {
t.PushContext(def)
c, h := t.CurrentCont(), t.closeStack.size()
defer func() {
ctx = t.PopContext()
if r := recover(); r != nil {
t.closeStack.truncate(h) // No resources to run that, so just discard it.
termErr, ok := r.(ContextTerminationError)
if !ok {
panic(r)
}
err = termErr
}
}()
err = t.cleanupCloseStack(c, h, f())
if t.GCPolicy() == IsolateGCPolicy {
t.runFinalizers(t.weakRefPool.ExtractAllMarkedFinalize())
}
if err != nil {
t.setStatus(StatusError)
}
return
}
//
// close stack operations
//
type closeStack struct {
stack []Value
}
func (s closeStack) size() int {
return len(s.stack)
}
func (s *closeStack) push(v Value) {
s.stack = append(s.stack, v)
}
func (s *closeStack) pop() (Value, bool) {
sz := len(s.stack)
if sz == 0 {
return NilValue, false
}
sz--
v := s.stack[sz]
s.stack = s.stack[:sz]
return v, true
}
func (s *closeStack) truncate(h int) | {
sz := len(s.stack)
if sz > h {
s.stack = s.stack[:h]
}
} | identifier_body |
|
thread.go | Runtime
mux sync.Mutex
status ThreadStatus
closeErr error // Error that caused the thread to stop
currentCont Cont // Currently running continuation
resumeCh chan valuesError
caller *Thread // Who resumed this thread
// Depth of GoFunction calls in the thread. This should not exceed
// maxGoFunctionCallDepth. The aim is to avoid Go stack overflows that
// cannot be recovered from (note that this does not limit recursion for Lua
// functions).
goFunctionCallDepth int
DebugHooks
closeStack // Stack of pending to-be-closed values
}
// NewThread creates a new thread out of a Runtime. Its initial
// status is suspended. Call Resume to run it.
func NewThread(r *Runtime) *Thread {
r.RequireSize(unsafe.Sizeof(Thread{}) + 100) // 100 is my guess at the size of a channel
return &Thread{
resumeCh: make(chan valuesError),
status: ThreadSuspended,
Runtime: r,
}
}
// CurrentCont returns the continuation currently running (or suspended) in the
// thread.
func (t *Thread) CurrentCont() Cont {
return t.currentCont
}
// IsMain returns true if the thread is the runtime's main thread.
func (t *Thread) IsMain() bool {
return t == t.mainThread
}
const maxErrorsInMessageHandler = 10
var errErrorInMessageHandler = StringValue("error in error handling")
// RunContinuation runs the continuation c in the thread. It keeps running until
// the next continuation is nil or an error occurs, in which case it returns the
// error.
func (t *Thread) RunContinuation(c Cont) (err error) {
var next Cont
var errContCount = 0
_ = t.triggerCall(t, c)
for c != nil {
if t != t.gcThread {
t.runPendingFinalizers()
}
t.currentCont = c
next, err = c.RunInThread(t)
if err != nil {
rtErr := ToError(err)
if rtErr.Handled() {
return rtErr
}
err = rtErr.AddContext(c, -1)
errContCount++
if t.messageHandler != nil {
if errContCount > maxErrorsInMessageHandler {
return newHandledError(errErrorInMessageHandler)
}
next = t.messageHandler.Continuation(t, newMessageHandlerCont(c))
} else {
next = newMessageHandlerCont(c) | return
}
// This is to be able to close a suspended coroutine without completing it, but
// still allow cleaning up the to-be-closed variables. If this is put on the
// resume channel of a running thread, yield will cause a panic in the goroutine
// and that will be caught in the defer() clause below.
type threadClose struct{}
//
// Coroutine management
//
// Start starts the thread in a goroutine, giving it the callable c to run. the
// t.Resume() method needs to be called to provide arguments to the callable.
func (t *Thread) Start(c Callable) {
t.RequireBytes(2 << 10) // A goroutine starts off with 2k stack
go func() {
var (
args []Value
err error
)
// If there was a panic due to an exceeded quota, we need to end the
// thread and propagate that panic to the calling thread
defer func() {
r := recover()
if r != nil {
switch r.(type) {
case ContextTerminationError:
case threadClose:
// This means we want to close the coroutine, so no panic!
r = nil
default:
panic(r)
}
}
t.end(args, err, r)
}()
args, err = t.getResumeValues()
if err == nil {
next := NewTerminationWith(t.CurrentCont(), 0, true)
err = t.call(c, args, next)
args = next.Etc()
}
}()
}
// Status returns the status of a thread (suspended, running or dead).
func (t *Thread) Status() ThreadStatus {
return t.status
}
// Resume execution of a suspended thread. Its status switches to
// running while its caller's status switches to suspended.
func (t *Thread) Resume(caller *Thread, args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return nil, errors.New("cannot resume dead thread")
default:
return nil, errors.New("cannot resume running thread")
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to resume is not running")
}
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(args, nil, nil)
return caller.getResumeValues()
}
// Close a suspended thread. If successful, its status switches to dead. The
// boolean returned is true if it was possible to close the thread (i.e. it was
// suspended or already dead). The error is non-nil if there was an error in
// the cleanup process, or if the thread had already stopped with an error
// previously.
func (t *Thread) Close(caller *Thread) (bool, error) {
t.mux.Lock()
if t.status != ThreadSuspended {
t.mux.Unlock()
switch t.status {
case ThreadDead:
return true, t.closeErr
default:
return false, nil
}
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to close is not running")
}
// The thread needs to go back to running to empty its close stack, before
// becoming dead.
t.caller = caller
t.status = ThreadOK
t.mux.Unlock()
caller.mux.Unlock()
t.sendResumeValues(nil, nil, threadClose{})
_, err := caller.getResumeValues()
return true, err
}
// Yield to the caller thread. The yielding thread's status switches to
// suspended. The caller's status must be OK.
func (t *Thread) Yield(args []Value) ([]Value, error) {
t.mux.Lock()
if t.status != ThreadOK {
panic("Thread to yield is not running")
}
caller := t.caller
if caller == nil {
t.mux.Unlock()
return nil, errors.New("cannot yield from main thread")
}
caller.mux.Lock()
if caller.status != ThreadOK {
panic("Caller of thread to yield is not OK")
}
t.status = ThreadSuspended
t.caller = nil
t.mux.Unlock()
caller.mux.Unlock()
caller.sendResumeValues(args, nil, nil)
return t.getResumeValues()
}
// This turns off the thread, cleaning up its close stack. The thread must be
// running.
func (t *Thread) end(args []Value, err error, exception interface{}) {
caller := t.caller
t.mux.Lock()
caller.mux.Lock()
defer t.mux.Unlock()
defer caller.mux.Unlock()
switch {
case t.status != ThreadOK:
panic("Called Thread.end on a non-running thread")
case caller.status != ThreadOK:
panic("Caller thread of ending thread is not OK")
}
close(t.resumeCh)
t.status = ThreadDead
t.caller = nil
err = t.cleanupCloseStack(nil, 0, err) // TODO: not nil
t.closeErr = err
caller.sendResumeValues(args, err, exception)
t.ReleaseBytes(2 << 10) // The goroutine will terminate after this
}
func (t *Thread) call(c Callable, args []Value, next Cont) error {
cont := c.Continuation(t, next)
t.Push(cont, args...)
return t.RunContinuation(cont)
}
func (t *Thread) getResumeValues() ([]Value, error) {
res := <-t.resumeCh
if res.exception != nil {
panic(res.exception)
}
return res.args, res.err
}
func (t *Thread) sendResumeValues(args []Value, err error, exception interface{}) {
t.resumeCh <- valuesError{args: args, err: err, exception: exception}
}
//
// Calling
//
// CallContext pushes a new runtime context on the thread's runtime and attempts
// to run f() in the thread. If the context runs out of resources while f() is
// running, all operations should abort and the CallContext should return
// immediately and not finalizing pending to-be-closed values.
//
// Otherwise (even if f() returns an error), pending to-be-closed values should
// be finalized.
//
// See quotas.md for details about this API.
func (t *Thread) CallContext(def RuntimeContextDef, f func() error) (ctx RuntimeContext, err error) {
t.PushContext(def)
c, h := t.CurrentCont(), t.closeStack.size()
defer func() {
ctx = t.PopContext()
if r := recover(); r != nil {
t.closeStack.truncate(h) // No resources to run that, so just discard it.
| }
next.Push(t.Runtime, ErrorValue(err))
}
c = next
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.