file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
backend.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! State machine backends. These manage the code and storage of contracts. use std::{error, fmt}; use std::cmp::Ord; use std::collections::HashMap; use std::marker::PhantomData; use log::warn; use hash_db::Hasher; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::TrieBackendStorage; use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration}; use trie::trie_types::{TrieDBMut, Layout}; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. pub trait Backend<H: Hasher> { /// An error type when fetching data is not possible. type Error: super::Error; /// Storage changes to be applied if committing type Transaction: Consolidate + Default; /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage<H>; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.storage(key).map(|v| v.map(|v| H::hash(&v))) } /// Get keyed child storage or None if there is nothing associated. fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> { Ok(self.storage(key)?.is_some()) } /// true if a key exists in child storage. fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> { Ok(self.child_storage(storage_key, key)?.is_some()) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F); /// Retrieve all entries keys of which start with the given prefix and /// call `f` for each of those keys. fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F); /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. /// Does not include child storage updates. fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>; /// Get all keys with given prefix fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); all } /// Get all keys of child storage with given prefix fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_in_child_storage(child_storage_key, |k| { if k.starts_with(prefix) { all.push(k.to_vec()); } }); all } /// Try convert into trie backend. fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. fn full_storage_root<I1, I2i, I2>( &self, delta: I1, child_deltas: I2) -> (H::Out, Self::Transaction) where I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2: IntoIterator<Item=(Vec<u8>, I2i)>, <H as Hasher>::Out: Ord, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_delta); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); } else { child_roots.push((storage_key, Some(child_root))); } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); (root, txs) } } /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. fn consolidate(&mut self, other: Self); } impl Consolidate for () { fn consolidate(&mut self, _: Self) { () } } impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> { fn consolidate(&mut self, other: Self) { trie::GenericMemoryDB::consolidate(self, other) } } /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 #[derive(Debug)] pub enum Void {} impl fmt::Display for Void { fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl error::Error for Void { fn description(&self) -> &str { "unreachable error" } } /// In-memory backend. Fully recomputes tries on each commit but useful for /// tests. pub struct InMemory<H: Hasher> { inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>, trie: Option<TrieBackend<MemoryDB<H>, H>>, _hasher: PhantomData<H>, } impl<H: Hasher> Default for InMemory<H> { fn default() -> Self { InMemory { inner: Default::default(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> Clone for InMemory<H> { fn clone(&self) -> Self { InMemory { inner: self.inner.clone(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> PartialEq for InMemory<H> { fn eq(&self, other: &Self) -> bool { self.inner.eq(&other.inner) } } impl<H: Hasher> InMemory<H> { /// Copy the state, with applied updates pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self { let mut inner: HashMap<_, _> = self.inner.clone(); for (storage_key, key, val) in changes { match val { Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, None => { inner.entry(storage_key).or_default().remove(&key); }, } } inner.into() } } impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> { fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self { InMemory { inner: inner, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> { fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); InMemory { inner: expanded, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> { fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self { let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new(); for (child_key, key, value) in inner { if let Some(value) = value { expanded.entry(child_key).or_default().insert(key, value); } } expanded.into() } } impl super::Error for Void {} impl<H: Hasher> InMemory<H> { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> { self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..])) } } impl<H: Hasher> Backend<H> for InMemory<H> { type Error = Void; type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>; type TrieBackendStorage = MemoryDB<H>; fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> { Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) } fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> { Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> { Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) } fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) { self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) { self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); } fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, <H as Hasher>::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); (root, full_transaction) } fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord { let storage_key = storage_key.to_vec(); let existing_pairs = self.inner.get(&Some(storage_key.clone())) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::<Layout<H>, _, _, _>( &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key); (root, is_default, full_transaction) } fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> { self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) .collect() } fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&None) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&Some(storage_key.to_vec())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> { let mut mdb = MemoryDB::default(); let mut root = None; let mut new_child_roots = Vec::new(); let mut root_map = None; for (storage_key, map) in &self.inner { if let Some(storage_key) = storage_key.as_ref()
else { root_map = Some(map); } } // root handling if let Some(map) = root_map.take() { root = Some(insert_into_memory_db::<H, _>( &mut mdb, map.clone().into_iter().chain(new_child_roots.into_iter()) )?); } let root = match root { Some(root) => root, None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?, }; self.trie = Some(TrieBackend::new(mdb, root)); self.trie.as_ref() } } /// Insert input pairs into memory db. pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out> where H: Hasher, I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>, { let mut root = <H as Hasher>::Out::default(); { let mut trie = TrieDBMut::<H>::new(mdb, &mut root); for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { warn!(target: "trie", "Failed to write to trie: {}", e); return None; } } } Some(root) }
{ let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?; new_child_roots.push((storage_key.clone(), ch.as_ref().into())); }
conditional_block
backend.rs
// Copyright 2017-2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Substrate is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Substrate. If not, see <http://www.gnu.org/licenses/>. //! State machine backends. These manage the code and storage of contracts. use std::{error, fmt}; use std::cmp::Ord; use std::collections::HashMap; use std::marker::PhantomData; use log::warn; use hash_db::Hasher; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::TrieBackendStorage; use trie::{TrieMut, MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration}; use trie::trie_types::{TrieDBMut, Layout}; /// A state backend is used to read state data and can have changes committed /// to it. /// /// The clone operation (if implemented) should be cheap. pub trait Backend<H: Hasher> { /// An error type when fetching data is not possible. type Error: super::Error; /// Storage changes to be applied if committing type Transaction: Consolidate + Default; /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage<H>; /// Get keyed storage or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get keyed storage value hash or None if there is nothing associated. fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.storage(key).map(|v| v.map(|v| H::hash(&v))) } /// Get keyed child storage or None if there is nothing associated. fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<H::Out>, Self::Error> { self.child_storage(storage_key, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> { Ok(self.storage(key)?.is_some()) } /// true if a key exists in child storage. fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> { Ok(self.child_storage(storage_key, key)?.is_some()) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F); /// Retrieve all entries keys of which start with the given prefix and /// call `f` for each of those keys. fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F); /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. /// Does not include child storage updates. fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord; /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>; /// Get all keys with given prefix fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); all } /// Get all keys of child storage with given prefix fn child_keys(&self, child_storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { let mut all = Vec::new(); self.for_keys_in_child_storage(child_storage_key, |k| { if k.starts_with(prefix) { all.push(k.to_vec()); } }); all } /// Try convert into trie backend. fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, H>>; /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. fn full_storage_root<I1, I2i, I2>( &self, delta: I1, child_deltas: I2) -> (H::Out, Self::Transaction) where I1: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2i: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, I2: IntoIterator<Item=(Vec<u8>, I2i)>, <H as Hasher>::Out: Ord, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_delta); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); } else { child_roots.push((storage_key, Some(child_root))); } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); (root, txs) } } /// Trait that allows consolidate two transactions together. pub trait Consolidate { /// Consolidate two transactions into one. fn consolidate(&mut self, other: Self); } impl Consolidate for () { fn consolidate(&mut self, _: Self) { () } } impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } impl<H: Hasher, KF: trie::KeyFunction<H>> Consolidate for trie::GenericMemoryDB<H, KF> { fn consolidate(&mut self, other: Self) { trie::GenericMemoryDB::consolidate(self, other) } } /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 #[derive(Debug)] pub enum Void {} impl fmt::Display for Void { fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { match *self {} } } impl error::Error for Void { fn description(&self) -> &str { "unreachable error" } } /// In-memory backend. Fully recomputes tries on each commit but useful for /// tests. pub struct InMemory<H: Hasher> { inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>, trie: Option<TrieBackend<MemoryDB<H>, H>>, _hasher: PhantomData<H>, } impl<H: Hasher> Default for InMemory<H> { fn default() -> Self { InMemory { inner: Default::default(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> Clone for InMemory<H> { fn clone(&self) -> Self { InMemory { inner: self.inner.clone(), trie: None, _hasher: PhantomData, } } } impl<H: Hasher> PartialEq for InMemory<H> { fn eq(&self, other: &Self) -> bool { self.inner.eq(&other.inner) } } impl<H: Hasher> InMemory<H> { /// Copy the state, with applied updates pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self { let mut inner: HashMap<_, _> = self.inner.clone(); for (storage_key, key, val) in changes { match val { Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, None => { inner.entry(storage_key).or_default().remove(&key); }, } } inner.into() } } impl<H: Hasher> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> { fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self { InMemory { inner: inner, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> { fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); InMemory { inner: expanded, trie: None, _hasher: PhantomData, } } } impl<H: Hasher> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> { fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self { let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new(); for (child_key, key, value) in inner { if let Some(value) = value { expanded.entry(child_key).or_default().insert(key, value); } } expanded.into() } } impl super::Error for Void {} impl<H: Hasher> InMemory<H> { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator<Item=&[u8]> { self.inner.iter().filter_map(|item| item.0.as_ref().map(|v|&v[..])) } } impl<H: Hasher> Backend<H> for InMemory<H> { type Error = Void; type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>; type TrieBackendStorage = MemoryDB<H>; fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> { Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) } fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> { Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error>
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) { self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) { self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); } fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, <H as Hasher>::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = Layout::<H>::trie_root(existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); (root, full_transaction) } fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction) where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>, H::Out: Ord { let storage_key = storage_key.to_vec(); let existing_pairs = self.inner.get(&Some(storage_key.clone())) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::<Layout<H>, _, _, _>( &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::<HashMap<_, _>>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key); (root, is_default, full_transaction) } fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> { self.inner.get(&None) .into_iter() .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) .collect() } fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&None) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn child_keys(&self, storage_key: &[u8], prefix: &[u8]) -> Vec<Vec<u8>> { self.inner.get(&Some(storage_key.to_vec())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() } fn as_trie_backend(&mut self)-> Option<&TrieBackend<Self::TrieBackendStorage, H>> { let mut mdb = MemoryDB::default(); let mut root = None; let mut new_child_roots = Vec::new(); let mut root_map = None; for (storage_key, map) in &self.inner { if let Some(storage_key) = storage_key.as_ref() { let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?; new_child_roots.push((storage_key.clone(), ch.as_ref().into())); } else { root_map = Some(map); } } // root handling if let Some(map) = root_map.take() { root = Some(insert_into_memory_db::<H, _>( &mut mdb, map.clone().into_iter().chain(new_child_roots.into_iter()) )?); } let root = match root { Some(root) => root, None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?, }; self.trie = Some(TrieBackend::new(mdb, root)); self.trie.as_ref() } } /// Insert input pairs into memory db. pub(crate) fn insert_into_memory_db<H, I>(mdb: &mut MemoryDB<H>, input: I) -> Option<H::Out> where H: Hasher, I: IntoIterator<Item=(Vec<u8>, Vec<u8>)>, { let mut root = <H as Hasher>::Out::default(); { let mut trie = TrieDBMut::<H>::new(mdb, &mut root); for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { warn!(target: "trie", "Failed to write to trie: {}", e); return None; } } } Some(root) }
{ Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) }
identifier_body
manager.go
package xym import ( "sync" "github.com/lightpaw/male7/pb/shared_proto" "sync/atomic" "sort" "github.com/lightpaw/male7/util/imath" "github.com/lightpaw/male7/util/sortkeys" atomic2 "github.com/lightpaw/male7/util/atomic" "github.com/lightpaw/male7/service/herosnapshot/snapshotdata" "github.com/lightpaw/male7/util/idbytes" "github.com/lightpaw/male7/util/u64" "github.com/lightpaw/pbutil" "github.com/lightpaw/male7/gen/pb/xuanyuan" "github.com/lightpaw/male7/util/must" "github.com/lightpaw/male7/pb/server_proto" "time" "github.com/lightpaw/male7/util/timeutil" ) func NewManager(rankCount uint64) *XuanyuanManager { return &XuanyuanManager{ rankCount: rankCount, challengerMap: make(map[int64]*XyHero), } } type XuanyuanManager struct { sync.RWMutex rankCount uint64 // 排行榜数据,在排行榜中的玩家都在上面 rrl RoRankList // 挑战者数据 challengerMap map[int64]*XyHero } func (m *XuanyuanManager) Encode(proto *server_proto.XuanyuanModuleProto
:= m.Get() if r != nil { proto.UpdateTime = timeutil.Marshal64(r.updateTime) proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros)) for _, v := range r.rankHeros { _, mirror := v.GetMirror() proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score.Load(), RankScore: v.rankScore, Win: v.win.Load(), Lose: v.lose.Load(), Mirror: mirror, }) } } m.RLock() defer m.RUnlock() for _, v := range m.challengerMap { proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score, Win: v.win, Lose: v.lose, Mirror: v.combatMirror, }) } } func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) { if proto == nil { return } n := len(proto.RankHero) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: timeutil.Unix64(proto.UpdateTime), } for i, v := range proto.RankHero { rank := i + 1 newHero := newRankHero(v.HeroId, u64.FromInt64(int64(v.Score)), u64.FromInt64(int64(v.RankScore)), v.Win, v.Lose, rank, v.Mirror) newRo.heroMap[newHero.heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } m.rrl.set(newRo) m.Lock() defer m.Unlock() for _, v := range proto.Challenger { m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror) } } func (m *XuanyuanManager) Get() *RoRank { return m.rrl.Get() } func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero { m.Lock() defer m.Unlock() toReturn := m.challengerMap m.challengerMap = make(map[int64]*XyHero) return toReturn } func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool { prev := m.Get() if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) { return false } heroMap := m.getAndClearChallenger() m.rrl.update(heroMap, int(m.rankCount), updateTime, prev) return true } func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.Lock() defer m.Unlock() m.addChallenger(heroId, score, win, lose, player) } func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.challengerMap[heroId] = &XyHero{ heroId: heroId, score: score, win: win, lose: lose, combatMirror: player, } } type RoRankList struct { v atomic.Value } func (r *RoRankList) Get() *RoRank { if rank := r.v.Load(); rank != nil { return rank.(*RoRank) } return nil } func (r *RoRankList) set(toSet *RoRank) { r.v.Store(toSet) } func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) { // 单线程更新 //if len(newHeroMap) <= 0 && prev != nil { // // 改个时间 // r.set(&RoRank{ // heroMap: prev.heroMap, // rankHeros: prev.rankHeros, // updateTime: updateTime, // }) // return //} pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap)) if prev != nil { for heroId, v := range prev.heroMap { // 如果在榜单中,已榜单为准 delete(newHeroMap, heroId) // 积分 + 战力 _, m := v.GetMirror() var fightAmount uint64 if m != nil { fightAmount = u64.FromInt32(m.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score.Load(), K2: fightAmount, V: heroId, }) } } for heroId, v := range newHeroMap { var fightAmount uint64 if v.combatMirror != nil { fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score, K2: fightAmount, V: heroId, }) } sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa))) n := imath.Min(len(pa), rankCount) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: updateTime, } for i := 0; i < n; i++ { rank := i + 1 p := pa[i] score := p.K1 heroId := p.I64Value() var newHero *XyRankHero if prev != nil { prevHero := prev.GetHero(heroId) if prevHero != nil { newHero = prevHero.copy(score, rank) } } if newHero == nil { challenger := newHeroMap[heroId] newHero = challenger.newRankHero(rank) } newRo.heroMap[heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } r.set(newRo) } type RoRank struct { heroMap map[int64]*XyRankHero rankHeros []*XyRankHero updateTime time.Time } func (m *RoRank) GetUpdateTime() time.Time { return m.updateTime } func (m *RoRank) RankCount() int { return len(m.rankHeros) } func (m *RoRank) GetHero(heroId int64) *XyRankHero { return m.heroMap[heroId] } func (m *RoRank) GetHeroByRank(rank int) *XyRankHero { if rank > 0 && rank <= len(m.rankHeros) { return m.rankHeros[rank-1] } return nil } func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) { for _, v := range m.rankHeros { if !f(v) { break } } } func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero { newHero := &XyRankHero{ heroId: heroId, score: atomic2.NewUint64(score), rankScore: rankScore, rank: rank, win: atomic2.NewUint64(win), lose: atomic2.NewUint64(lose), combatMirrorRef: &atomic.Value{}, } newHero.SetMirror(combatMirror, int64(rank)) return newHero } type XyRankHero struct { // 玩家id heroId int64 // 当前积分 score *atomic2.Uint64 // 排名积分 rankScore uint64 // 名次 rank int // 胜利次数 win *atomic2.Uint64 // 失败次数 lose *atomic2.Uint64 // 挑战镜像 combatMirrorRef *atomic.Value targetBytesCache atomic.Value } func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero { newHero := &XyRankHero{ heroId: hero.heroId, score: hero.score, rankScore: rankScore, rank: rank, win: hero.win, lose: hero.lose, combatMirrorRef: hero.combatMirrorRef, } return newHero } func (hero *XyRankHero) Id() int64 { return hero.heroId } func (hero *XyRankHero) Rank() int { return hero.rank } func (hero *XyRankHero) GetScore() uint64 { return hero.score.Load() } func (hero *XyRankHero) SetScore(toSet uint64) { hero.score.Store(toSet) } func (hero *XyRankHero) GetWin() uint64 { return hero.win.Load() } func (hero *XyRankHero) IncWin() uint64 { amt := hero.win.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) GetLose() uint64 { return hero.lose.Load() } func (hero *XyRankHero) IncLose() uint64 { amt := hero.lose.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) EncodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) []byte { cache := hero.targetBytesCache.Load() if cache != nil { if b, ok := cache.([]byte); ok && len(b) > 0 { return b } } proto := hero.encodeTarget(getter) protoBytes := must.Marshal(proto) hero.targetBytesCache.Store(protoBytes) return protoBytes } var emptyBytes = make([]byte, 0) func (hero *XyRankHero) clearTargetBytesCache() { hero.targetBytesCache.Store(emptyBytes) } func (hero *XyRankHero) encodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) *shared_proto.XuanyuanTargetProto { proto := &shared_proto.XuanyuanTargetProto{} heroSnapshot := getter(hero.Id()) if heroSnapshot != nil { proto.Hero = heroSnapshot.EncodeBasic4Client() } else { proto.Hero = idbytes.HeroBasicProto(hero.Id()) } proto.Win = u64.Int32(hero.GetWin()) proto.Lose = u64.Int32(hero.GetLose()) proto.Score = u64.Int32(hero.rankScore) ref := hero.getMirrorRef() proto.FightAmount = ref.combatMirror.TotalFightAmount return proto } func (hero *XyRankHero) getMirrorRef() *combatMirrorWithVersion { return hero.combatMirrorRef.Load().(*combatMirrorWithVersion) } func (hero *XyRankHero) GetMirror() (int64, *shared_proto.CombatPlayerProto) { ref := hero.getMirrorRef() return ref.version, ref.combatMirror } func (hero *XyRankHero) SetMirror(toSet *shared_proto.CombatPlayerProto, version int64) int64 { newMirror := newCombatMirror(toSet, version) hero.combatMirrorRef.Store(newMirror) hero.clearTargetBytesCache() return newMirror.version } func (hero *XyRankHero) GetQueryTargetTroopMsg() pbutil.Buffer { return hero.getMirrorRef().getQueryTroopMsg(hero.Id()) } func newCombatMirror(combatMirror *shared_proto.CombatPlayerProto, version int64) *combatMirrorWithVersion { return &combatMirrorWithVersion{ version: version, combatMirror: combatMirror, } } type combatMirrorWithVersion struct { version int64 combatMirror *shared_proto.CombatPlayerProto queryTroopMsgCache atomic.Value } func (c *combatMirrorWithVersion) getQueryTroopMsg(heroId int64) pbutil.Buffer { msgRef := c.queryTroopMsgCache.Load() if msgRef != nil { return msgRef.(pbutil.Buffer) } msg := xuanyuan.NewS2cQueryTargetTroopMsg(idbytes.ToBytes(heroId), int32(c.version), must.Marshal(c.combatMirror)).Static() c.queryTroopMsgCache.Store(msg) return msg } type XyHero struct { // 玩家id heroId int64 // 最新积分 score uint64 // 胜利次数 win uint64 // 失败次数 lose uint64 // 挑战镜像 combatMirror *shared_proto.CombatPlayerProto } func (hero *XyHero) newRankHero(rank int) *XyRankHero { return newRankHero(hero.heroId, hero.score, hero.score, hero.win, hero.lose, rank, hero.combatMirror) }
) { r
identifier_name
manager.go
package xym import ( "sync" "github.com/lightpaw/male7/pb/shared_proto" "sync/atomic" "sort" "github.com/lightpaw/male7/util/imath" "github.com/lightpaw/male7/util/sortkeys" atomic2 "github.com/lightpaw/male7/util/atomic" "github.com/lightpaw/male7/service/herosnapshot/snapshotdata" "github.com/lightpaw/male7/util/idbytes" "github.com/lightpaw/male7/util/u64" "github.com/lightpaw/pbutil" "github.com/lightpaw/male7/gen/pb/xuanyuan" "github.com/lightpaw/male7/util/must" "github.com/lightpaw/male7/pb/server_proto" "time" "github.com/lightpaw/male7/util/timeutil" ) func NewManager(rankCount uint64) *XuanyuanManager { return &XuanyuanManager{ rankCount: rankCount, challengerMap: make(map[int64]*XyHero), } } type XuanyuanManager struct { sync.RWMutex rankCount uint64 // 排行榜数据,在排行榜中的玩家都在上面 rrl RoRankList // 挑战者数据 challengerMap map[int64]*XyHero } func (m *XuanyuanManager) Encode(proto *server_proto.XuanyuanModuleProto) { r := m.Get() if r != nil { proto.UpdateTime = timeutil.Marshal64(r.updateTime) proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros)) for _, v := range r.rankHeros { _, mirror := v.GetMirror() proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score.Load(), RankScore: v.rankScore, Win: v.win.Load(), Lose: v.lose.Load(), Mirror: mirror, }) } } m.RLock() defer m.RUnlock() for _, v := range m.challengerMap { proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score, Win: v.win, Lose: v.lose, Mirror: v.combatMirror, }) } } func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) { if proto == nil { return } n := len(proto.RankHero) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: timeutil.Unix64(proto.UpdateTime), } for i, v := range proto.RankHero { rank := i + 1 newHero := newRankHero(v.HeroId, u64.FromInt64(int64(v.Score)), u64.FromInt64(int64(v.RankScore)), v.Win, v.Lose, rank, v.Mirror) newRo.heroMap[newHero.heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } m.rrl.set(newRo) m.Lock() defer m.Unlock() for _, v := range proto.Challenger { m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror) } } func (m *XuanyuanManager) Get() *RoRank { return m.rrl.Get() } func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero { m.Lock() defer m.Unlock() toReturn := m.challengerMap m.challengerMap = make(map[int64]*XyHero) return toReturn } func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool { prev := m.Get() if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) { return false } heroMap := m.getAndClearChallenger() m.rrl.update(heroMap, int(m.rankCount), updateTime, prev) return true } func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.Lock() defer m.Unlock() m.addChallenger(heroId, score, win, lose, player) } func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.challengerMap[heroId] = &XyHero{ heroId: heroId, score: score, win: win, lose: lose, combatMirror: player, } } type RoRankList struct { v atomic.Value } func (r *RoRankList) Get() *RoRank { if rank := r.v.Load(); rank != nil { return rank.(*RoRank) } return nil } func (r *RoRankList) set(toSet *RoRank) { r.v.Store(toSet) } func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) { // 单线程更新 //if len(newHeroMap) <= 0 && prev != nil { // // 改个时间 // r.set(&RoRank{ // heroMap: prev.heroMap, // rankHeros: prev.rankHeros, // updateTime: updateTime, // }) // return //} pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap)) if prev != nil { for heroId, v := range prev.heroMap { // 如果在榜单中,已榜单为准 delete(newHeroMap, heroId) // 积分 + 战力 _, m := v.GetMirror() var fightAmount uint64 if m != nil { fightAmount = u64.FromInt32(m.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score.Load(), K2: fightAmount, V: heroId, }) } } for heroId, v := range newHeroMap { var fightAmount uint64 if v.combatMirror != nil { fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score, K2: fightAmount, V: heroId, }) } sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa))) n := imath.Min(len(pa), rankCount) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: updateTime, } for i := 0; i < n; i++ { rank := i + 1 p := pa[i] score := p.K1 heroId := p.I64Value() var newHero *XyRankHero if prev != nil { prevHero := prev.GetHero(heroId) if prevHero != nil { newHero = prevHero.copy(score, rank) } } if newHero == nil { challenger := newHeroMap[heroId] newHero = challenger.newRankHero(rank) } newRo.heroMap[heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } r.set(newRo) } type RoRank struct { heroMap map[int64]*XyRankHero rankHeros []*XyRankHero updateTime time.Time } func (m *RoRank) GetUpdateTime() time.Time { return m.updateTime } func (m *RoRank) RankCount() int { return len(m.rankHeros) } func (m *RoRank) GetHero(heroId int64) *XyRankHero { return m.heroMap[heroId] } func (m *RoRank) GetHeroByRank(rank int) *XyRankHero { if rank > 0 && rank <= len(m.rankHeros) { return m.rankHeros[rank-1] } return nil } func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) { for _, v := range m.rankHeros { if !f(v) { break } } } func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero { newHero := &XyRankHero{ heroId: heroId, score: atomic2.NewUint64(score), rankScore: rankScore, rank: rank, win: atomic2.NewUint64(win), lose: atomic2.NewUint64(lose), combatMirrorRef: &atomic.Value{}, } newHero.SetMirror(combatMirror, int64(rank)) return newHero } type XyRankHero struct { // 玩家id heroId int64 // 当前积分 score *atomic2.Uint64 // 排名积分 rankScore uint64 // 名次 rank int // 胜利次数 win *atomic2.Uint64 // 失败次数 lose *atomic2.Uint64 // 挑战镜像 combatMirrorRef *atomic.Value targetBytesCache atomic.Value } func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero { newHero := &XyRankHero{ heroId: hero.heroId, score: hero.score, rankScore: rankScore, rank: rank, win: hero.win, lose: hero.lose, combatMirrorRef: hero.combatMirrorRef, } return newHero } func (hero *XyRankHero) Id() int64 { return hero.heroId } func (hero *XyRankHero) Rank() int { return hero.rank } func (hero *XyRankHero) GetScore() uint64 { return hero.score.Load() } func (hero *XyRankHero) SetScore(toSet uint64) { hero.score.Store(toSet) } func (hero *XyRankHero) GetWin() uint64 { return hero.win.Load() } func (hero *XyRankHero) IncWin() uint64 { amt := hero.win.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) GetLose() uint64 { return hero.lose.Load() } func (hero *XyRankHero) IncLose() uint64 { amt := hero.lose.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) EncodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) []byte { cache := hero.targetBytesCache.Load() if cache != nil { if b, ok := cache.([]byte); ok && len(b) > 0 { return b } } proto := hero.encodeTarget(getter) protoBytes := must.Marshal(proto) hero.targetBytesCache.Store(protoBytes)
var emptyBytes = make([]byte, 0) func (hero *XyRankHero) clearTargetBytesCache() { hero.targetBytesCache.Store(emptyBytes) } func (hero *XyRankHero) encodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) *shared_proto.XuanyuanTargetProto { proto := &shared_proto.XuanyuanTargetProto{} heroSnapshot := getter(hero.Id()) if heroSnapshot != nil { proto.Hero = heroSnapshot.EncodeBasic4Client() } else { proto.Hero = idbytes.HeroBasicProto(hero.Id()) } proto.Win = u64.Int32(hero.GetWin()) proto.Lose = u64.Int32(hero.GetLose()) proto.Score = u64.Int32(hero.rankScore) ref := hero.getMirrorRef() proto.FightAmount = ref.combatMirror.TotalFightAmount return proto } func (hero *XyRankHero) getMirrorRef() *combatMirrorWithVersion { return hero.combatMirrorRef.Load().(*combatMirrorWithVersion) } func (hero *XyRankHero) GetMirror() (int64, *shared_proto.CombatPlayerProto) { ref := hero.getMirrorRef() return ref.version, ref.combatMirror } func (hero *XyRankHero) SetMirror(toSet *shared_proto.CombatPlayerProto, version int64) int64 { newMirror := newCombatMirror(toSet, version) hero.combatMirrorRef.Store(newMirror) hero.clearTargetBytesCache() return newMirror.version } func (hero *XyRankHero) GetQueryTargetTroopMsg() pbutil.Buffer { return hero.getMirrorRef().getQueryTroopMsg(hero.Id()) } func newCombatMirror(combatMirror *shared_proto.CombatPlayerProto, version int64) *combatMirrorWithVersion { return &combatMirrorWithVersion{ version: version, combatMirror: combatMirror, } } type combatMirrorWithVersion struct { version int64 combatMirror *shared_proto.CombatPlayerProto queryTroopMsgCache atomic.Value } func (c *combatMirrorWithVersion) getQueryTroopMsg(heroId int64) pbutil.Buffer { msgRef := c.queryTroopMsgCache.Load() if msgRef != nil { return msgRef.(pbutil.Buffer) } msg := xuanyuan.NewS2cQueryTargetTroopMsg(idbytes.ToBytes(heroId), int32(c.version), must.Marshal(c.combatMirror)).Static() c.queryTroopMsgCache.Store(msg) return msg } type XyHero struct { // 玩家id heroId int64 // 最新积分 score uint64 // 胜利次数 win uint64 // 失败次数 lose uint64 // 挑战镜像 combatMirror *shared_proto.CombatPlayerProto } func (hero *XyHero) newRankHero(rank int) *XyRankHero { return newRankHero(hero.heroId, hero.score, hero.score, hero.win, hero.lose, rank, hero.combatMirror) }
return protoBytes }
random_line_split
manager.go
package xym import ( "sync" "github.com/lightpaw/male7/pb/shared_proto" "sync/atomic" "sort" "github.com/lightpaw/male7/util/imath" "github.com/lightpaw/male7/util/sortkeys" atomic2 "github.com/lightpaw/male7/util/atomic" "github.com/lightpaw/male7/service/herosnapshot/snapshotdata" "github.com/lightpaw/male7/util/idbytes" "github.com/lightpaw/male7/util/u64" "github.com/lightpaw/pbutil" "github.com/lightpaw/male7/gen/pb/xuanyuan" "github.com/lightpaw/male7/util/must" "github.com/lightpaw/male7/pb/server_proto" "time" "github.com/lightpaw/male7/util/timeutil" ) func NewManager(rankCount uint64) *XuanyuanManager { return &XuanyuanManager{ rankCount: rankCount, challengerMap: make(map[int64]*XyHero), } } type XuanyuanManager struct { sync.RWMutex rankCount uint64 // 排行榜数据,在排行榜中的玩家都在上面 rrl RoRankList // 挑战者数据 challengerMap map[int64]*XyHero } func (m *XuanyuanManager) Encode(proto *server_proto.XuanyuanModuleProto) { r := m.Get() if r != nil { proto.UpdateTime = timeutil.Marshal64(r.updateTime) proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros)) for _, v := range r.rankHeros { _, mirror := v.GetMirror() proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score.Load(), RankScore: v.rankScore, Win: v.win.Load(), Lose: v.lose.Load(), Mirror: mirror, }) } } m.RLock() defer m.RUnlock() for _, v := range m.challengerMap { proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score, Win: v.win, Lose: v.lose, Mirror: v.combatMirror, }) } } func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) { if proto == nil { return } n := len(proto.RankHero) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: timeutil.Unix64(proto.UpdateTime), } for i, v := range proto.RankHero { rank := i + 1 newHero := newRankHero(v.HeroId, u64.FromInt64(int64(v.Score)), u64.FromInt64(int64(v.RankScore)), v.Win, v.Lose, rank, v.Mirror) newRo.heroMap[newHero.heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } m.rrl.set(newRo) m.Lock() defer m.Unlock() for _, v := range proto.Challenger { m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror) } } func (m *XuanyuanManager) Get() *RoRank { return m.rrl.Get() } func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero { m.Lock() defer m.Unlock() toReturn := m.challengerMap m.challengerMap = make(map[int64]*XyHero) return toReturn } func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool { prev := m.Get() if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) { return false } heroMap := m.getAndClearChallenger() m.rrl.update(heroMap, int(m.rankCount), updateTime, prev) return true } func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.Lock() defer m.Unlock() m.addChallenger(heroId, score, win, lose, player) } func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.challengerMap[heroId] = &XyHero{ heroId: heroId, score: score, win: win, lose: lose, combatMirror: player, } } type RoRankList struct { v atomic.Value } func (r *RoRankList) Get() *RoRank { if rank := r.v.Load(); rank != nil { retu
.v.Store(toSet) } func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) { // 单线程更新 //if len(newHeroMap) <= 0 && prev != nil { // // 改个时间 // r.set(&RoRank{ // heroMap: prev.heroMap, // rankHeros: prev.rankHeros, // updateTime: updateTime, // }) // return //} pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap)) if prev != nil { for heroId, v := range prev.heroMap { // 如果在榜单中,已榜单为准 delete(newHeroMap, heroId) // 积分 + 战力 _, m := v.GetMirror() var fightAmount uint64 if m != nil { fightAmount = u64.FromInt32(m.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score.Load(), K2: fightAmount, V: heroId, }) } } for heroId, v := range newHeroMap { var fightAmount uint64 if v.combatMirror != nil { fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score, K2: fightAmount, V: heroId, }) } sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa))) n := imath.Min(len(pa), rankCount) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: updateTime, } for i := 0; i < n; i++ { rank := i + 1 p := pa[i] score := p.K1 heroId := p.I64Value() var newHero *XyRankHero if prev != nil { prevHero := prev.GetHero(heroId) if prevHero != nil { newHero = prevHero.copy(score, rank) } } if newHero == nil { challenger := newHeroMap[heroId] newHero = challenger.newRankHero(rank) } newRo.heroMap[heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } r.set(newRo) } type RoRank struct { heroMap map[int64]*XyRankHero rankHeros []*XyRankHero updateTime time.Time } func (m *RoRank) GetUpdateTime() time.Time { return m.updateTime } func (m *RoRank) RankCount() int { return len(m.rankHeros) } func (m *RoRank) GetHero(heroId int64) *XyRankHero { return m.heroMap[heroId] } func (m *RoRank) GetHeroByRank(rank int) *XyRankHero { if rank > 0 && rank <= len(m.rankHeros) { return m.rankHeros[rank-1] } return nil } func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) { for _, v := range m.rankHeros { if !f(v) { break } } } func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero { newHero := &XyRankHero{ heroId: heroId, score: atomic2.NewUint64(score), rankScore: rankScore, rank: rank, win: atomic2.NewUint64(win), lose: atomic2.NewUint64(lose), combatMirrorRef: &atomic.Value{}, } newHero.SetMirror(combatMirror, int64(rank)) return newHero } type XyRankHero struct { // 玩家id heroId int64 // 当前积分 score *atomic2.Uint64 // 排名积分 rankScore uint64 // 名次 rank int // 胜利次数 win *atomic2.Uint64 // 失败次数 lose *atomic2.Uint64 // 挑战镜像 combatMirrorRef *atomic.Value targetBytesCache atomic.Value } func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero { newHero := &XyRankHero{ heroId: hero.heroId, score: hero.score, rankScore: rankScore, rank: rank, win: hero.win, lose: hero.lose, combatMirrorRef: hero.combatMirrorRef, } return newHero } func (hero *XyRankHero) Id() int64 { return hero.heroId } func (hero *XyRankHero) Rank() int { return hero.rank } func (hero *XyRankHero) GetScore() uint64 { return hero.score.Load() } func (hero *XyRankHero) SetScore(toSet uint64) { hero.score.Store(toSet) } func (hero *XyRankHero) GetWin() uint64 { return hero.win.Load() } func (hero *XyRankHero) IncWin() uint64 { amt := hero.win.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) GetLose() uint64 { return hero.lose.Load() } func (hero *XyRankHero) IncLose() uint64 { amt := hero.lose.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) EncodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) []byte { cache := hero.targetBytesCache.Load() if cache != nil { if b, ok := cache.([]byte); ok && len(b) > 0 { return b } } proto := hero.encodeTarget(getter) protoBytes := must.Marshal(proto) hero.targetBytesCache.Store(protoBytes) return protoBytes } var emptyBytes = make([]byte, 0) func (hero *XyRankHero) clearTargetBytesCache() { hero.targetBytesCache.Store(emptyBytes) } func (hero *XyRankHero) encodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) *shared_proto.XuanyuanTargetProto { proto := &shared_proto.XuanyuanTargetProto{} heroSnapshot := getter(hero.Id()) if heroSnapshot != nil { proto.Hero = heroSnapshot.EncodeBasic4Client() } else { proto.Hero = idbytes.HeroBasicProto(hero.Id()) } proto.Win = u64.Int32(hero.GetWin()) proto.Lose = u64.Int32(hero.GetLose()) proto.Score = u64.Int32(hero.rankScore) ref := hero.getMirrorRef() proto.FightAmount = ref.combatMirror.TotalFightAmount return proto } func (hero *XyRankHero) getMirrorRef() *combatMirrorWithVersion { return hero.combatMirrorRef.Load().(*combatMirrorWithVersion) } func (hero *XyRankHero) GetMirror() (int64, *shared_proto.CombatPlayerProto) { ref := hero.getMirrorRef() return ref.version, ref.combatMirror } func (hero *XyRankHero) SetMirror(toSet *shared_proto.CombatPlayerProto, version int64) int64 { newMirror := newCombatMirror(toSet, version) hero.combatMirrorRef.Store(newMirror) hero.clearTargetBytesCache() return newMirror.version } func (hero *XyRankHero) GetQueryTargetTroopMsg() pbutil.Buffer { return hero.getMirrorRef().getQueryTroopMsg(hero.Id()) } func newCombatMirror(combatMirror *shared_proto.CombatPlayerProto, version int64) *combatMirrorWithVersion { return &combatMirrorWithVersion{ version: version, combatMirror: combatMirror, } } type combatMirrorWithVersion struct { version int64 combatMirror *shared_proto.CombatPlayerProto queryTroopMsgCache atomic.Value } func (c *combatMirrorWithVersion) getQueryTroopMsg(heroId int64) pbutil.Buffer { msgRef := c.queryTroopMsgCache.Load() if msgRef != nil { return msgRef.(pbutil.Buffer) } msg := xuanyuan.NewS2cQueryTargetTroopMsg(idbytes.ToBytes(heroId), int32(c.version), must.Marshal(c.combatMirror)).Static() c.queryTroopMsgCache.Store(msg) return msg } type XyHero struct { // 玩家id heroId int64 // 最新积分 score uint64 // 胜利次数 win uint64 // 失败次数 lose uint64 // 挑战镜像 combatMirror *shared_proto.CombatPlayerProto } func (hero *XyHero) newRankHero(rank int) *XyRankHero { return newRankHero(hero.heroId, hero.score, hero.score, hero.win, hero.lose, rank, hero.combatMirror) }
rn rank.(*RoRank) } return nil } func (r *RoRankList) set(toSet *RoRank) { r
identifier_body
manager.go
package xym import ( "sync" "github.com/lightpaw/male7/pb/shared_proto" "sync/atomic" "sort" "github.com/lightpaw/male7/util/imath" "github.com/lightpaw/male7/util/sortkeys" atomic2 "github.com/lightpaw/male7/util/atomic" "github.com/lightpaw/male7/service/herosnapshot/snapshotdata" "github.com/lightpaw/male7/util/idbytes" "github.com/lightpaw/male7/util/u64" "github.com/lightpaw/pbutil" "github.com/lightpaw/male7/gen/pb/xuanyuan" "github.com/lightpaw/male7/util/must" "github.com/lightpaw/male7/pb/server_proto" "time" "github.com/lightpaw/male7/util/timeutil" ) func NewManager(rankCount uint64) *XuanyuanManager { return &XuanyuanManager{ rankCount: rankCount, challengerMap: make(map[int64]*XyHero), } } type XuanyuanManager struct { sync.RWMutex rankCount uint64 // 排行榜数据,在排行榜中的玩家都在上面 rrl RoRankList // 挑战者数据 challengerMap map[int64]*XyHero } func (m *XuanyuanManager) Encode(proto *server_proto.XuanyuanModuleProto) { r := m.Get() if r != nil { proto.UpdateTime = timeutil.Marshal64(r.up
ange m.challengerMap { proto.Challenger = append(proto.Challenger, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score, Win: v.win, Lose: v.lose, Mirror: v.combatMirror, }) } } func (m *XuanyuanManager) Unmarshal(proto *server_proto.XuanyuanModuleProto) { if proto == nil { return } n := len(proto.RankHero) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: timeutil.Unix64(proto.UpdateTime), } for i, v := range proto.RankHero { rank := i + 1 newHero := newRankHero(v.HeroId, u64.FromInt64(int64(v.Score)), u64.FromInt64(int64(v.RankScore)), v.Win, v.Lose, rank, v.Mirror) newRo.heroMap[newHero.heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } m.rrl.set(newRo) m.Lock() defer m.Unlock() for _, v := range proto.Challenger { m.addChallenger(v.HeroId, v.Score, v.Win, v.Lose, v.Mirror) } } func (m *XuanyuanManager) Get() *RoRank { return m.rrl.Get() } func (m *XuanyuanManager) getAndClearChallenger() map[int64]*XyHero { m.Lock() defer m.Unlock() toReturn := m.challengerMap m.challengerMap = make(map[int64]*XyHero) return toReturn } func (m *XuanyuanManager) Update(updateTime time.Time, gmReset bool) bool { prev := m.Get() if !gmReset && prev != nil && !prev.updateTime.Before(updateTime) { return false } heroMap := m.getAndClearChallenger() m.rrl.update(heroMap, int(m.rankCount), updateTime, prev) return true } func (m *XuanyuanManager) AddChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.Lock() defer m.Unlock() m.addChallenger(heroId, score, win, lose, player) } func (m *XuanyuanManager) addChallenger(heroId int64, score, win, lose uint64, player *shared_proto.CombatPlayerProto) { m.challengerMap[heroId] = &XyHero{ heroId: heroId, score: score, win: win, lose: lose, combatMirror: player, } } type RoRankList struct { v atomic.Value } func (r *RoRankList) Get() *RoRank { if rank := r.v.Load(); rank != nil { return rank.(*RoRank) } return nil } func (r *RoRankList) set(toSet *RoRank) { r.v.Store(toSet) } func (r *RoRankList) update(newHeroMap map[int64]*XyHero, rankCount int, updateTime time.Time, prev *RoRank) { // 单线程更新 //if len(newHeroMap) <= 0 && prev != nil { // // 改个时间 // r.set(&RoRank{ // heroMap: prev.heroMap, // rankHeros: prev.rankHeros, // updateTime: updateTime, // }) // return //} pa := make([]*sortkeys.U64K2V, 0, len(newHeroMap)) if prev != nil { for heroId, v := range prev.heroMap { // 如果在榜单中,已榜单为准 delete(newHeroMap, heroId) // 积分 + 战力 _, m := v.GetMirror() var fightAmount uint64 if m != nil { fightAmount = u64.FromInt32(m.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score.Load(), K2: fightAmount, V: heroId, }) } } for heroId, v := range newHeroMap { var fightAmount uint64 if v.combatMirror != nil { fightAmount = u64.FromInt32(v.combatMirror.TotalFightAmount) } pa = append(pa, &sortkeys.U64K2V{ K1: v.score, K2: fightAmount, V: heroId, }) } sort.Sort(sort.Reverse(sortkeys.U64K2VSlice(pa))) n := imath.Min(len(pa), rankCount) newRo := &RoRank{ heroMap: make(map[int64]*XyRankHero, n), rankHeros: make([]*XyRankHero, 0, n), updateTime: updateTime, } for i := 0; i < n; i++ { rank := i + 1 p := pa[i] score := p.K1 heroId := p.I64Value() var newHero *XyRankHero if prev != nil { prevHero := prev.GetHero(heroId) if prevHero != nil { newHero = prevHero.copy(score, rank) } } if newHero == nil { challenger := newHeroMap[heroId] newHero = challenger.newRankHero(rank) } newRo.heroMap[heroId] = newHero newRo.rankHeros = append(newRo.rankHeros, newHero) } r.set(newRo) } type RoRank struct { heroMap map[int64]*XyRankHero rankHeros []*XyRankHero updateTime time.Time } func (m *RoRank) GetUpdateTime() time.Time { return m.updateTime } func (m *RoRank) RankCount() int { return len(m.rankHeros) } func (m *RoRank) GetHero(heroId int64) *XyRankHero { return m.heroMap[heroId] } func (m *RoRank) GetHeroByRank(rank int) *XyRankHero { if rank > 0 && rank <= len(m.rankHeros) { return m.rankHeros[rank-1] } return nil } func (m *RoRank) Range(f func(hero *XyRankHero) (toContinue bool)) { for _, v := range m.rankHeros { if !f(v) { break } } } func newRankHero(heroId int64, score, rankScore, win, lose uint64, rank int, combatMirror *shared_proto.CombatPlayerProto) *XyRankHero { newHero := &XyRankHero{ heroId: heroId, score: atomic2.NewUint64(score), rankScore: rankScore, rank: rank, win: atomic2.NewUint64(win), lose: atomic2.NewUint64(lose), combatMirrorRef: &atomic.Value{}, } newHero.SetMirror(combatMirror, int64(rank)) return newHero } type XyRankHero struct { // 玩家id heroId int64 // 当前积分 score *atomic2.Uint64 // 排名积分 rankScore uint64 // 名次 rank int // 胜利次数 win *atomic2.Uint64 // 失败次数 lose *atomic2.Uint64 // 挑战镜像 combatMirrorRef *atomic.Value targetBytesCache atomic.Value } func (hero *XyRankHero) copy(rankScore uint64, rank int) *XyRankHero { newHero := &XyRankHero{ heroId: hero.heroId, score: hero.score, rankScore: rankScore, rank: rank, win: hero.win, lose: hero.lose, combatMirrorRef: hero.combatMirrorRef, } return newHero } func (hero *XyRankHero) Id() int64 { return hero.heroId } func (hero *XyRankHero) Rank() int { return hero.rank } func (hero *XyRankHero) GetScore() uint64 { return hero.score.Load() } func (hero *XyRankHero) SetScore(toSet uint64) { hero.score.Store(toSet) } func (hero *XyRankHero) GetWin() uint64 { return hero.win.Load() } func (hero *XyRankHero) IncWin() uint64 { amt := hero.win.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) GetLose() uint64 { return hero.lose.Load() } func (hero *XyRankHero) IncLose() uint64 { amt := hero.lose.Inc() hero.clearTargetBytesCache() return amt } func (hero *XyRankHero) EncodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) []byte { cache := hero.targetBytesCache.Load() if cache != nil { if b, ok := cache.([]byte); ok && len(b) > 0 { return b } } proto := hero.encodeTarget(getter) protoBytes := must.Marshal(proto) hero.targetBytesCache.Store(protoBytes) return protoBytes } var emptyBytes = make([]byte, 0) func (hero *XyRankHero) clearTargetBytesCache() { hero.targetBytesCache.Store(emptyBytes) } func (hero *XyRankHero) encodeTarget(getter func(int64) *snapshotdata.HeroSnapshot) *shared_proto.XuanyuanTargetProto { proto := &shared_proto.XuanyuanTargetProto{} heroSnapshot := getter(hero.Id()) if heroSnapshot != nil { proto.Hero = heroSnapshot.EncodeBasic4Client() } else { proto.Hero = idbytes.HeroBasicProto(hero.Id()) } proto.Win = u64.Int32(hero.GetWin()) proto.Lose = u64.Int32(hero.GetLose()) proto.Score = u64.Int32(hero.rankScore) ref := hero.getMirrorRef() proto.FightAmount = ref.combatMirror.TotalFightAmount return proto } func (hero *XyRankHero) getMirrorRef() *combatMirrorWithVersion { return hero.combatMirrorRef.Load().(*combatMirrorWithVersion) } func (hero *XyRankHero) GetMirror() (int64, *shared_proto.CombatPlayerProto) { ref := hero.getMirrorRef() return ref.version, ref.combatMirror } func (hero *XyRankHero) SetMirror(toSet *shared_proto.CombatPlayerProto, version int64) int64 { newMirror := newCombatMirror(toSet, version) hero.combatMirrorRef.Store(newMirror) hero.clearTargetBytesCache() return newMirror.version } func (hero *XyRankHero) GetQueryTargetTroopMsg() pbutil.Buffer { return hero.getMirrorRef().getQueryTroopMsg(hero.Id()) } func newCombatMirror(combatMirror *shared_proto.CombatPlayerProto, version int64) *combatMirrorWithVersion { return &combatMirrorWithVersion{ version: version, combatMirror: combatMirror, } } type combatMirrorWithVersion struct { version int64 combatMirror *shared_proto.CombatPlayerProto queryTroopMsgCache atomic.Value } func (c *combatMirrorWithVersion) getQueryTroopMsg(heroId int64) pbutil.Buffer { msgRef := c.queryTroopMsgCache.Load() if msgRef != nil { return msgRef.(pbutil.Buffer) } msg := xuanyuan.NewS2cQueryTargetTroopMsg(idbytes.ToBytes(heroId), int32(c.version), must.Marshal(c.combatMirror)).Static() c.queryTroopMsgCache.Store(msg) return msg } type XyHero struct { // 玩家id heroId int64 // 最新积分 score uint64 // 胜利次数 win uint64 // 失败次数 lose uint64 // 挑战镜像 combatMirror *shared_proto.CombatPlayerProto } func (hero *XyHero) newRankHero(rank int) *XyRankHero { return newRankHero(hero.heroId, hero.score, hero.score, hero.win, hero.lose, rank, hero.combatMirror) }
dateTime) proto.RankHero = make([]*server_proto.XuanyuanRankHeroProto, 0, len(r.rankHeros)) for _, v := range r.rankHeros { _, mirror := v.GetMirror() proto.RankHero = append(proto.RankHero, &server_proto.XuanyuanRankHeroProto{ HeroId: v.heroId, Score: v.score.Load(), RankScore: v.rankScore, Win: v.win.Load(), Lose: v.lose.Load(), Mirror: mirror, }) } } m.RLock() defer m.RUnlock() for _, v := r
conditional_block
webpack.ts
/* eslint-disable no-underscore-dangle */ /* eslint-disable @typescript-eslint/no-var-requires */ import * as path from 'path'; import * as url from 'url'; import * as webpack from 'webpack'; import type { BroilerConfig } from './config'; import { executeSync } from './exec'; // Webpack plugins const ExtractCssChunks = require('extract-css-chunks-webpack-plugin'); const FaviconsWebpackPlugin = require('favicons-webpack-plugin'); const ForkTsCheckerWebpackPlugin = require('fork-ts-checker-webpack-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer'); export interface WebpackConfigOptions extends BroilerConfig { devServer: boolean; analyze: boolean; } /** * Creates the Webpack 2 configuration for the front-end asset compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getFrontendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { devServer, debug, iconFile, sourceDir, buildDir, stageDir, title, siteFile, projectRootPath, analyze, assetsRoot, serverRoot, } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const stageDirPath = path.resolve(projectRootPath, stageDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; const assetsOrigin = `${assetsRootUrl.protocol}//${assetsRootUrl.host}`; const gitCommitHash = executeSync('git rev-parse HEAD'); const gitVersion = executeSync('git describe --always --dirty="-$(git diff-tree HEAD | md5 -q | head -c 8)"'); const gitBranch = executeSync('git rev-parse --abbrev-ref HEAD'); // Generate the plugins const plugins: webpack.Plugin[] = [ ...getCommonPlugins({ frontend: true, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath }), // Create HTML plugins for each webpage new HtmlWebpackPlugin({ title, filename: devServer ? 'index.html' : 'index.[hash].html', template: path.resolve(__dirname, './res/index.html'), chunks: ['app'], // Insert tags for stylesheets and scripts inject: 'body', // No cache-busting needed, because hash is included in file names hash: false, }), /** * Replace "global variables" from the scripts with the constant values. */ new webpack.DefinePlugin({ // Allow using the GIT commit hash ID __COMMIT_HASH__: JSON.stringify(gitCommitHash), // Allow using the GIT version __VERSION__: JSON.stringify(gitVersion), // Allow using the GIT branch name __BRANCH__: JSON.stringify(gitBranch), }), ]; if (!devServer) { plugins.push( // Generate some stats for the bundles getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-frontend.html`)), ); } // Define the entry for the app const entries: Record<string, string[]> = { app: [require.resolve(devServer ? './bootstrap/local-site' : './bootstrap/site')], }; /** * If icon source file is provided, generate icons for the app. * For configuration, see https://github.com/jantimon/favicons-webpack-plugin */ if (iconFile) { plugins.push( new FaviconsWebpackPlugin({ // Your source logo logo: path.resolve(sourceDirPath, iconFile), // The prefix for all image files (might be a folder or a name) prefix: devServer ? `${assetsFilePrefix}icons/` : `${assetsFilePrefix}icons/[hash]/`, // Inject the html into the html-webpack-plugin inject: true, // Locate the cache folder inside the .broiler directory cache: path.resolve(stageDirPath, '.fwp-cache'), // The configuration passed to `favicons`: // https://github.com/itgalaxy/favicons#usage // NOTE: The most of the metadata is read automatically from package.json favicons: { // Your application's name. `string` appName: title, // TODO: Your application's description. `string` appDescription: null, // TODO: Your (or your developer's) name. `string` developerName: null, // TODO: Your (or your developer's) URL. `string` developerURL: null, // TODO: Your application's version string. `string` version: null, // Start URL when launching the application from a device. `string` start_url: serverRoot, // Print logs to console? `boolean` logging: false, /** * Which icons should be generated. * Platform Options: * - offset - offset in percentage * - shadow - drop shadow for Android icons, available online only * - background: * * false - use default * * true - force use default, e.g. set background for Android icons * * color - set background for the specified icons */ icons: { // Create Android homescreen icon. `boolean` or `{ offset, background, shadow }` android: !devServer && !debug, // Create Apple touch icons. `boolean` or `{ offset, background }` appleIcon: !devServer && !debug, // Create Apple startup images. `boolean` or `{ offset, background }` appleStartup: !devServer && !debug, // Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }` coast: false, // Create regular favicons. `boolean` favicons: true, // Create Firefox OS icons. `boolean` or `{ offset, background }` firefox: false, // Create Windows 8 tile icons. `boolean` or `{ background }` windows: !devServer && !debug, // Create Yandex browser icon. `boolean` or `{ background }` yandex: false, }, }, }), ); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', // The main entry points for source files. entry: entries, // Supposed to run in a browser target: 'web', output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`, // The URL to the output directory resolved relative to the HTML page // This will be the origin, not including the path, because that will be used as a subdirectory for files. publicPath: `${assetsOrigin}/`, // The name of the exported library, e.g. the global variable name library: 'app', // How the library is exported? E.g. 'var', 'this' libraryTarget: 'var', }, module: { rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }), }, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: { // The entry point will `require` this module for finding the website component _site: path.resolve(projectRootPath, sourceDir, siteFile), }, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Behavior for polyfilling node modules node: { // The default value `true` seems not to work with RxJS // TODO: Take a look if this can be enabled setImmediate: false, }, // Enable sourcemaps for debugging webpack's output. devtool: devServer ? 'inline-source-map' : 'source-map', // Plugins plugins, }; } /** * Creates the Webpack 2 configuration for the back-end code compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { serverFile, databaseFile, siteFile, triggersFile } = config; const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config; const { analyze, stageDir } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const stageDirPath = path.resolve(projectRootPath, stageDir); // Use the tsconfig.json in the project folder (not in this library) const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Target backend always to ES2018 const compilerOptions = { target: 'ES2017' } as const; // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; // Generate the plugins const plugins: webpack.Plugin[] = [ // Perform type checking for TypeScript ...getCommonPlugins({ frontend: false, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions, }), /** * Prevent `pg` module to import `pg-native` binding library. */ new webpack.IgnorePlugin({ resourceRegExp: /^\.\/native$/, contextRegExp: /node_modules\/pg\/lib$/, }), ]; if (!devServer) { // Generate some stats for the bundles plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`))); } // Entry points to be bundled const entries: Record<string, string> = { // Entry point for rendering the views on server-side server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'), }; // Aliases that entry points will `require` const aliases: Record<string, string> = { _site: path.resolve(projectRootPath, sourceDir, siteFile), }; // Modules excluded from the bundle const externals = [ // No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment 'aws-sdk', ]; // If an API is defined, compile it as well if (serverFile) { // eslint-disable-next-line no-underscore-dangle aliases._service = path.resolve(projectRootPath, sourceDir, serverFile); } else { // API not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_service'); } // If a database defined, compile it as well if (databaseFile) { // eslint-disable-next-line no-underscore-dangle aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile); } else { // Database not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_db'); } // If a triggers file is defined, compile it as well if (triggersFile) { // eslint-disable-next-line no-underscore-dangle aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile); } else { // Triggers not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_triggers'); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', optimization: { // For better tracebacks, do not minify server-side code, // even in production. minimize: false, }, // Build for running in node environment, instead of web browser target: 'node', // The main entry points for source files. entry: entries, output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? '[name].js' : '[name].[hash].js', // The URL to the output directory resolved relative to the HTML page publicPath: `${assetsRoot}/`, // Export so for use in a Lambda function libraryTarget: 'commonjs2', }, module: { rules: getCommonRules({ devServer, debug, tsConfigPath, compilerOptions, assetsFilePrefix, emitFile: false, }), }, externals, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: aliases, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Enable sourcemaps for debugging webpack's output. devtool: 'source-map', // Plugins plugins, }; } function getCommonPlugins(options: { frontend: boolean; devServer: boolean; assetsFilePrefix: string; tsConfigPath: string; sourceDirPath: string; compilerOptions?: unknown; }): webpack.Plugin[] { const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options; const cssFilePrefix = `${assetsFilePrefix}css/`; return [ // https://github.com/faceyspacey/extract-css-chunks-webpack-plugin new ExtractCssChunks({ filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`, chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`, ignoreOrder: false, }), // Perform type checking for TypeScript new ForkTsCheckerWebpackPlugin({ typescript: { configFile: tsConfigPath, configOverwrite: { compilerOptions, }, }, // When running the dev server, the backend compilation will handle ESlinting eslint: frontend && devServer ? undefined : { files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'), }, }), // Prevent all the MomentJS locales to be imported by default. new webpack.ContextReplacementPlugin( /\bmoment[/\\]locale\b/, // Regular expression to match the files that should be imported /\ben.js/, ), ]; } function getCommonRules(options: { assetsFilePrefix: string; debug: boolean; devServer: boolean; tsConfigPath: string; emitFile: boolean; compilerOptions?: unknown; }): webpack.RuleSetRule[] { const { tsConfigPath, compilerOptions, debug, devServer, assetsFilePrefix, emitFile } = options; return [ // Pre-process sourcemaps for scripts { test: /\.(jsx?|tsx?)$/, loader: 'source-map-loader', enforce: 'pre' as const, }, // Compile TypeScript files ('.ts' or '.tsx') { test: /\.tsx?$/, loader: 'ts-loader', options: { // Explicitly expect the tsconfig.json to be located at the project root configFile: tsConfigPath, // Disable type checker - use `fork-ts-checker-webpack-plugin` for that purpose instead transpileOnly: true, compilerOptions, }, }, // Extract stylesheets as separate CSS files { test: /\.css$/i, sideEffects: true, use: [ { loader: ExtractCssChunks.loader, options: { esModule: true, }, }, { loader: 'css-loader', options: { modules: { mode: 'local', // Auto-generated class names contain the original name on development localIdentName: debug || devServer ? '[local]--[hash:base64:5]' : '[hash:base64]', }, }, }, ], }, // Optimize image files and bundle them as files or data URIs { test: /\.(gif|png|jpe?g|svg)$/, use: [ { loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}images/[name].[hash].[ext]`, }, }, { loader: 'image-webpack-loader', options: { disable: debug || devServer, optipng: { optimizationLevel: 7, }, }, }, ], }, // Include font files either as data URIs or separate files { test: /\.(eot|ttf|otf|woff2?|svg)($|\?|#)/, loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}fonts/[name].[hash].[ext]`, }, }, ]; } function getBundleAnalyzerPlugin(enabled: boolean, filename: string)
{ return new BundleAnalyzerPlugin({ // Can be `server`, `static` or `disabled`. // In `server` mode analyzer will start HTTP server to show bundle report. // In `static` mode single HTML file with bundle report will be generated. // In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`. analyzerMode: enabled ? 'static' : 'disabled', // Host that will be used in `server` mode to start HTTP server. analyzerHost: '127.0.0.1', // Port that will be used in `server` mode to start HTTP server. analyzerPort: 8888, // Path to bundle report file that will be generated in `static` mode. // Relative to bundles output directory. reportFilename: filename, // Module sizes to show in report by default. // Should be one of `stat`, `parsed` or `gzip`. // See "Definitions" section for more information. defaultSizes: 'parsed', // Automatically open report in default browser openAnalyzer: enabled, // If `true`, Webpack Stats JSON file will be generated in bundles output directory generateStatsFile: false, // Name of Webpack Stats JSON file that will be generated if `generateStatsFile` is `true`. // Relative to bundles output directory. statsFilename: 'stats.json', // Options for `stats.toJson()` method. // For example you can exclude sources of your modules from stats file with `source: false` option. // See more options here: https://github.com/webpack/webpack/blob/webpack-1/lib/Stats.js#L21 statsOptions: null, // Log level. Can be 'info', 'warn', 'error' or 'silent'. logLevel: 'info', }); }
identifier_body
webpack.ts
/* eslint-disable no-underscore-dangle */ /* eslint-disable @typescript-eslint/no-var-requires */ import * as path from 'path'; import * as url from 'url'; import * as webpack from 'webpack'; import type { BroilerConfig } from './config'; import { executeSync } from './exec'; // Webpack plugins const ExtractCssChunks = require('extract-css-chunks-webpack-plugin'); const FaviconsWebpackPlugin = require('favicons-webpack-plugin'); const ForkTsCheckerWebpackPlugin = require('fork-ts-checker-webpack-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer'); export interface WebpackConfigOptions extends BroilerConfig { devServer: boolean; analyze: boolean; } /** * Creates the Webpack 2 configuration for the front-end asset compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getFrontendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { devServer, debug, iconFile, sourceDir, buildDir, stageDir, title, siteFile, projectRootPath, analyze, assetsRoot, serverRoot, } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const stageDirPath = path.resolve(projectRootPath, stageDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; const assetsOrigin = `${assetsRootUrl.protocol}//${assetsRootUrl.host}`; const gitCommitHash = executeSync('git rev-parse HEAD'); const gitVersion = executeSync('git describe --always --dirty="-$(git diff-tree HEAD | md5 -q | head -c 8)"'); const gitBranch = executeSync('git rev-parse --abbrev-ref HEAD'); // Generate the plugins const plugins: webpack.Plugin[] = [ ...getCommonPlugins({ frontend: true, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath }), // Create HTML plugins for each webpage new HtmlWebpackPlugin({ title, filename: devServer ? 'index.html' : 'index.[hash].html', template: path.resolve(__dirname, './res/index.html'), chunks: ['app'], // Insert tags for stylesheets and scripts inject: 'body', // No cache-busting needed, because hash is included in file names hash: false, }), /** * Replace "global variables" from the scripts with the constant values. */ new webpack.DefinePlugin({ // Allow using the GIT commit hash ID __COMMIT_HASH__: JSON.stringify(gitCommitHash), // Allow using the GIT version __VERSION__: JSON.stringify(gitVersion), // Allow using the GIT branch name __BRANCH__: JSON.stringify(gitBranch), }), ]; if (!devServer) { plugins.push( // Generate some stats for the bundles getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-frontend.html`)), ); } // Define the entry for the app const entries: Record<string, string[]> = { app: [require.resolve(devServer ? './bootstrap/local-site' : './bootstrap/site')], }; /** * If icon source file is provided, generate icons for the app. * For configuration, see https://github.com/jantimon/favicons-webpack-plugin */ if (iconFile) { plugins.push( new FaviconsWebpackPlugin({ // Your source logo logo: path.resolve(sourceDirPath, iconFile), // The prefix for all image files (might be a folder or a name) prefix: devServer ? `${assetsFilePrefix}icons/` : `${assetsFilePrefix}icons/[hash]/`, // Inject the html into the html-webpack-plugin inject: true, // Locate the cache folder inside the .broiler directory cache: path.resolve(stageDirPath, '.fwp-cache'), // The configuration passed to `favicons`: // https://github.com/itgalaxy/favicons#usage // NOTE: The most of the metadata is read automatically from package.json favicons: { // Your application's name. `string` appName: title, // TODO: Your application's description. `string` appDescription: null, // TODO: Your (or your developer's) name. `string` developerName: null, // TODO: Your (or your developer's) URL. `string` developerURL: null, // TODO: Your application's version string. `string` version: null, // Start URL when launching the application from a device. `string` start_url: serverRoot, // Print logs to console? `boolean` logging: false, /** * Which icons should be generated. * Platform Options: * - offset - offset in percentage * - shadow - drop shadow for Android icons, available online only * - background: * * false - use default * * true - force use default, e.g. set background for Android icons * * color - set background for the specified icons */ icons: { // Create Android homescreen icon. `boolean` or `{ offset, background, shadow }` android: !devServer && !debug, // Create Apple touch icons. `boolean` or `{ offset, background }` appleIcon: !devServer && !debug, // Create Apple startup images. `boolean` or `{ offset, background }` appleStartup: !devServer && !debug, // Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }` coast: false, // Create regular favicons. `boolean` favicons: true, // Create Firefox OS icons. `boolean` or `{ offset, background }` firefox: false, // Create Windows 8 tile icons. `boolean` or `{ background }` windows: !devServer && !debug, // Create Yandex browser icon. `boolean` or `{ background }` yandex: false, }, }, }), ); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', // The main entry points for source files. entry: entries, // Supposed to run in a browser target: 'web', output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`, // The URL to the output directory resolved relative to the HTML page // This will be the origin, not including the path, because that will be used as a subdirectory for files. publicPath: `${assetsOrigin}/`, // The name of the exported library, e.g. the global variable name library: 'app', // How the library is exported? E.g. 'var', 'this' libraryTarget: 'var', }, module: { rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }), }, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: { // The entry point will `require` this module for finding the website component _site: path.resolve(projectRootPath, sourceDir, siteFile), }, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Behavior for polyfilling node modules node: { // The default value `true` seems not to work with RxJS // TODO: Take a look if this can be enabled setImmediate: false, }, // Enable sourcemaps for debugging webpack's output. devtool: devServer ? 'inline-source-map' : 'source-map', // Plugins plugins, }; } /** * Creates the Webpack 2 configuration for the back-end code compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { serverFile, databaseFile, siteFile, triggersFile } = config; const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config; const { analyze, stageDir } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const stageDirPath = path.resolve(projectRootPath, stageDir); // Use the tsconfig.json in the project folder (not in this library) const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Target backend always to ES2018 const compilerOptions = { target: 'ES2017' } as const; // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; // Generate the plugins const plugins: webpack.Plugin[] = [ // Perform type checking for TypeScript ...getCommonPlugins({ frontend: false, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions, }), /** * Prevent `pg` module to import `pg-native` binding library. */ new webpack.IgnorePlugin({ resourceRegExp: /^\.\/native$/, contextRegExp: /node_modules\/pg\/lib$/, }), ]; if (!devServer) { // Generate some stats for the bundles plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`))); } // Entry points to be bundled const entries: Record<string, string> = { // Entry point for rendering the views on server-side server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'), }; // Aliases that entry points will `require` const aliases: Record<string, string> = { _site: path.resolve(projectRootPath, sourceDir, siteFile), }; // Modules excluded from the bundle const externals = [ // No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment 'aws-sdk', ]; // If an API is defined, compile it as well if (serverFile) { // eslint-disable-next-line no-underscore-dangle aliases._service = path.resolve(projectRootPath, sourceDir, serverFile); } else { // API not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_service'); } // If a database defined, compile it as well if (databaseFile) { // eslint-disable-next-line no-underscore-dangle aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile); } else { // Database not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_db'); } // If a triggers file is defined, compile it as well if (triggersFile) { // eslint-disable-next-line no-underscore-dangle aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile); } else { // Triggers not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_triggers'); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', optimization: { // For better tracebacks, do not minify server-side code, // even in production. minimize: false, }, // Build for running in node environment, instead of web browser target: 'node', // The main entry points for source files. entry: entries, output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? '[name].js' : '[name].[hash].js', // The URL to the output directory resolved relative to the HTML page publicPath: `${assetsRoot}/`, // Export so for use in a Lambda function libraryTarget: 'commonjs2', }, module: { rules: getCommonRules({ devServer, debug, tsConfigPath, compilerOptions, assetsFilePrefix, emitFile: false, }), }, externals, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: aliases, }, resolveLoader: {
// Enable sourcemaps for debugging webpack's output. devtool: 'source-map', // Plugins plugins, }; } function getCommonPlugins(options: { frontend: boolean; devServer: boolean; assetsFilePrefix: string; tsConfigPath: string; sourceDirPath: string; compilerOptions?: unknown; }): webpack.Plugin[] { const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options; const cssFilePrefix = `${assetsFilePrefix}css/`; return [ // https://github.com/faceyspacey/extract-css-chunks-webpack-plugin new ExtractCssChunks({ filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`, chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`, ignoreOrder: false, }), // Perform type checking for TypeScript new ForkTsCheckerWebpackPlugin({ typescript: { configFile: tsConfigPath, configOverwrite: { compilerOptions, }, }, // When running the dev server, the backend compilation will handle ESlinting eslint: frontend && devServer ? undefined : { files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'), }, }), // Prevent all the MomentJS locales to be imported by default. new webpack.ContextReplacementPlugin( /\bmoment[/\\]locale\b/, // Regular expression to match the files that should be imported /\ben.js/, ), ]; } function getCommonRules(options: { assetsFilePrefix: string; debug: boolean; devServer: boolean; tsConfigPath: string; emitFile: boolean; compilerOptions?: unknown; }): webpack.RuleSetRule[] { const { tsConfigPath, compilerOptions, debug, devServer, assetsFilePrefix, emitFile } = options; return [ // Pre-process sourcemaps for scripts { test: /\.(jsx?|tsx?)$/, loader: 'source-map-loader', enforce: 'pre' as const, }, // Compile TypeScript files ('.ts' or '.tsx') { test: /\.tsx?$/, loader: 'ts-loader', options: { // Explicitly expect the tsconfig.json to be located at the project root configFile: tsConfigPath, // Disable type checker - use `fork-ts-checker-webpack-plugin` for that purpose instead transpileOnly: true, compilerOptions, }, }, // Extract stylesheets as separate CSS files { test: /\.css$/i, sideEffects: true, use: [ { loader: ExtractCssChunks.loader, options: { esModule: true, }, }, { loader: 'css-loader', options: { modules: { mode: 'local', // Auto-generated class names contain the original name on development localIdentName: debug || devServer ? '[local]--[hash:base64:5]' : '[hash:base64]', }, }, }, ], }, // Optimize image files and bundle them as files or data URIs { test: /\.(gif|png|jpe?g|svg)$/, use: [ { loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}images/[name].[hash].[ext]`, }, }, { loader: 'image-webpack-loader', options: { disable: debug || devServer, optipng: { optimizationLevel: 7, }, }, }, ], }, // Include font files either as data URIs or separate files { test: /\.(eot|ttf|otf|woff2?|svg)($|\?|#)/, loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}fonts/[name].[hash].[ext]`, }, }, ]; } function getBundleAnalyzerPlugin(enabled: boolean, filename: string) { return new BundleAnalyzerPlugin({ // Can be `server`, `static` or `disabled`. // In `server` mode analyzer will start HTTP server to show bundle report. // In `static` mode single HTML file with bundle report will be generated. // In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`. analyzerMode: enabled ? 'static' : 'disabled', // Host that will be used in `server` mode to start HTTP server. analyzerHost: '127.0.0.1', // Port that will be used in `server` mode to start HTTP server. analyzerPort: 8888, // Path to bundle report file that will be generated in `static` mode. // Relative to bundles output directory. reportFilename: filename, // Module sizes to show in report by default. // Should be one of `stat`, `parsed` or `gzip`. // See "Definitions" section for more information. defaultSizes: 'parsed', // Automatically open report in default browser openAnalyzer: enabled, // If `true`, Webpack Stats JSON file will be generated in bundles output directory generateStatsFile: false, // Name of Webpack Stats JSON file that will be generated if `generateStatsFile` is `true`. // Relative to bundles output directory. statsFilename: 'stats.json', // Options for `stats.toJson()` method. // For example you can exclude sources of your modules from stats file with `source: false` option. // See more options here: https://github.com/webpack/webpack/blob/webpack-1/lib/Stats.js#L21 statsOptions: null, // Log level. Can be 'info', 'warn', 'error' or 'silent'. logLevel: 'info', }); }
// Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], },
random_line_split
webpack.ts
/* eslint-disable no-underscore-dangle */ /* eslint-disable @typescript-eslint/no-var-requires */ import * as path from 'path'; import * as url from 'url'; import * as webpack from 'webpack'; import type { BroilerConfig } from './config'; import { executeSync } from './exec'; // Webpack plugins const ExtractCssChunks = require('extract-css-chunks-webpack-plugin'); const FaviconsWebpackPlugin = require('favicons-webpack-plugin'); const ForkTsCheckerWebpackPlugin = require('fork-ts-checker-webpack-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer'); export interface WebpackConfigOptions extends BroilerConfig { devServer: boolean; analyze: boolean; } /** * Creates the Webpack 2 configuration for the front-end asset compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getFrontendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { devServer, debug, iconFile, sourceDir, buildDir, stageDir, title, siteFile, projectRootPath, analyze, assetsRoot, serverRoot, } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const stageDirPath = path.resolve(projectRootPath, stageDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; const assetsOrigin = `${assetsRootUrl.protocol}//${assetsRootUrl.host}`; const gitCommitHash = executeSync('git rev-parse HEAD'); const gitVersion = executeSync('git describe --always --dirty="-$(git diff-tree HEAD | md5 -q | head -c 8)"'); const gitBranch = executeSync('git rev-parse --abbrev-ref HEAD'); // Generate the plugins const plugins: webpack.Plugin[] = [ ...getCommonPlugins({ frontend: true, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath }), // Create HTML plugins for each webpage new HtmlWebpackPlugin({ title, filename: devServer ? 'index.html' : 'index.[hash].html', template: path.resolve(__dirname, './res/index.html'), chunks: ['app'], // Insert tags for stylesheets and scripts inject: 'body', // No cache-busting needed, because hash is included in file names hash: false, }), /** * Replace "global variables" from the scripts with the constant values. */ new webpack.DefinePlugin({ // Allow using the GIT commit hash ID __COMMIT_HASH__: JSON.stringify(gitCommitHash), // Allow using the GIT version __VERSION__: JSON.stringify(gitVersion), // Allow using the GIT branch name __BRANCH__: JSON.stringify(gitBranch), }), ]; if (!devServer) { plugins.push( // Generate some stats for the bundles getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-frontend.html`)), ); } // Define the entry for the app const entries: Record<string, string[]> = { app: [require.resolve(devServer ? './bootstrap/local-site' : './bootstrap/site')], }; /** * If icon source file is provided, generate icons for the app. * For configuration, see https://github.com/jantimon/favicons-webpack-plugin */ if (iconFile) { plugins.push( new FaviconsWebpackPlugin({ // Your source logo logo: path.resolve(sourceDirPath, iconFile), // The prefix for all image files (might be a folder or a name) prefix: devServer ? `${assetsFilePrefix}icons/` : `${assetsFilePrefix}icons/[hash]/`, // Inject the html into the html-webpack-plugin inject: true, // Locate the cache folder inside the .broiler directory cache: path.resolve(stageDirPath, '.fwp-cache'), // The configuration passed to `favicons`: // https://github.com/itgalaxy/favicons#usage // NOTE: The most of the metadata is read automatically from package.json favicons: { // Your application's name. `string` appName: title, // TODO: Your application's description. `string` appDescription: null, // TODO: Your (or your developer's) name. `string` developerName: null, // TODO: Your (or your developer's) URL. `string` developerURL: null, // TODO: Your application's version string. `string` version: null, // Start URL when launching the application from a device. `string` start_url: serverRoot, // Print logs to console? `boolean` logging: false, /** * Which icons should be generated. * Platform Options: * - offset - offset in percentage * - shadow - drop shadow for Android icons, available online only * - background: * * false - use default * * true - force use default, e.g. set background for Android icons * * color - set background for the specified icons */ icons: { // Create Android homescreen icon. `boolean` or `{ offset, background, shadow }` android: !devServer && !debug, // Create Apple touch icons. `boolean` or `{ offset, background }` appleIcon: !devServer && !debug, // Create Apple startup images. `boolean` or `{ offset, background }` appleStartup: !devServer && !debug, // Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }` coast: false, // Create regular favicons. `boolean` favicons: true, // Create Firefox OS icons. `boolean` or `{ offset, background }` firefox: false, // Create Windows 8 tile icons. `boolean` or `{ background }` windows: !devServer && !debug, // Create Yandex browser icon. `boolean` or `{ background }` yandex: false, }, }, }), ); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', // The main entry points for source files. entry: entries, // Supposed to run in a browser target: 'web', output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`, // The URL to the output directory resolved relative to the HTML page // This will be the origin, not including the path, because that will be used as a subdirectory for files. publicPath: `${assetsOrigin}/`, // The name of the exported library, e.g. the global variable name library: 'app', // How the library is exported? E.g. 'var', 'this' libraryTarget: 'var', }, module: { rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }), }, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: { // The entry point will `require` this module for finding the website component _site: path.resolve(projectRootPath, sourceDir, siteFile), }, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Behavior for polyfilling node modules node: { // The default value `true` seems not to work with RxJS // TODO: Take a look if this can be enabled setImmediate: false, }, // Enable sourcemaps for debugging webpack's output. devtool: devServer ? 'inline-source-map' : 'source-map', // Plugins plugins, }; } /** * Creates the Webpack 2 configuration for the back-end code compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { serverFile, databaseFile, siteFile, triggersFile } = config; const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config; const { analyze, stageDir } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const stageDirPath = path.resolve(projectRootPath, stageDir); // Use the tsconfig.json in the project folder (not in this library) const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Target backend always to ES2018 const compilerOptions = { target: 'ES2017' } as const; // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; // Generate the plugins const plugins: webpack.Plugin[] = [ // Perform type checking for TypeScript ...getCommonPlugins({ frontend: false, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions, }), /** * Prevent `pg` module to import `pg-native` binding library. */ new webpack.IgnorePlugin({ resourceRegExp: /^\.\/native$/, contextRegExp: /node_modules\/pg\/lib$/, }), ]; if (!devServer) { // Generate some stats for the bundles plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`))); } // Entry points to be bundled const entries: Record<string, string> = { // Entry point for rendering the views on server-side server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'), }; // Aliases that entry points will `require` const aliases: Record<string, string> = { _site: path.resolve(projectRootPath, sourceDir, siteFile), }; // Modules excluded from the bundle const externals = [ // No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment 'aws-sdk', ]; // If an API is defined, compile it as well if (serverFile) { // eslint-disable-next-line no-underscore-dangle aliases._service = path.resolve(projectRootPath, sourceDir, serverFile); } else { // API not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_service'); } // If a database defined, compile it as well if (databaseFile) { // eslint-disable-next-line no-underscore-dangle aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile); } else { // Database not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_db'); } // If a triggers file is defined, compile it as well if (triggersFile) { // eslint-disable-next-line no-underscore-dangle aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile); } else { // Triggers not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_triggers'); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', optimization: { // For better tracebacks, do not minify server-side code, // even in production. minimize: false, }, // Build for running in node environment, instead of web browser target: 'node', // The main entry points for source files. entry: entries, output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? '[name].js' : '[name].[hash].js', // The URL to the output directory resolved relative to the HTML page publicPath: `${assetsRoot}/`, // Export so for use in a Lambda function libraryTarget: 'commonjs2', }, module: { rules: getCommonRules({ devServer, debug, tsConfigPath, compilerOptions, assetsFilePrefix, emitFile: false, }), }, externals, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: aliases, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Enable sourcemaps for debugging webpack's output. devtool: 'source-map', // Plugins plugins, }; } function getCommonPlugins(options: { frontend: boolean; devServer: boolean; assetsFilePrefix: string; tsConfigPath: string; sourceDirPath: string; compilerOptions?: unknown; }): webpack.Plugin[] { const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options; const cssFilePrefix = `${assetsFilePrefix}css/`; return [ // https://github.com/faceyspacey/extract-css-chunks-webpack-plugin new ExtractCssChunks({ filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`, chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`, ignoreOrder: false, }), // Perform type checking for TypeScript new ForkTsCheckerWebpackPlugin({ typescript: { configFile: tsConfigPath, configOverwrite: { compilerOptions, }, }, // When running the dev server, the backend compilation will handle ESlinting eslint: frontend && devServer ? undefined : { files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'), }, }), // Prevent all the MomentJS locales to be imported by default. new webpack.ContextReplacementPlugin( /\bmoment[/\\]locale\b/, // Regular expression to match the files that should be imported /\ben.js/, ), ]; } function
(options: { assetsFilePrefix: string; debug: boolean; devServer: boolean; tsConfigPath: string; emitFile: boolean; compilerOptions?: unknown; }): webpack.RuleSetRule[] { const { tsConfigPath, compilerOptions, debug, devServer, assetsFilePrefix, emitFile } = options; return [ // Pre-process sourcemaps for scripts { test: /\.(jsx?|tsx?)$/, loader: 'source-map-loader', enforce: 'pre' as const, }, // Compile TypeScript files ('.ts' or '.tsx') { test: /\.tsx?$/, loader: 'ts-loader', options: { // Explicitly expect the tsconfig.json to be located at the project root configFile: tsConfigPath, // Disable type checker - use `fork-ts-checker-webpack-plugin` for that purpose instead transpileOnly: true, compilerOptions, }, }, // Extract stylesheets as separate CSS files { test: /\.css$/i, sideEffects: true, use: [ { loader: ExtractCssChunks.loader, options: { esModule: true, }, }, { loader: 'css-loader', options: { modules: { mode: 'local', // Auto-generated class names contain the original name on development localIdentName: debug || devServer ? '[local]--[hash:base64:5]' : '[hash:base64]', }, }, }, ], }, // Optimize image files and bundle them as files or data URIs { test: /\.(gif|png|jpe?g|svg)$/, use: [ { loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}images/[name].[hash].[ext]`, }, }, { loader: 'image-webpack-loader', options: { disable: debug || devServer, optipng: { optimizationLevel: 7, }, }, }, ], }, // Include font files either as data URIs or separate files { test: /\.(eot|ttf|otf|woff2?|svg)($|\?|#)/, loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}fonts/[name].[hash].[ext]`, }, }, ]; } function getBundleAnalyzerPlugin(enabled: boolean, filename: string) { return new BundleAnalyzerPlugin({ // Can be `server`, `static` or `disabled`. // In `server` mode analyzer will start HTTP server to show bundle report. // In `static` mode single HTML file with bundle report will be generated. // In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`. analyzerMode: enabled ? 'static' : 'disabled', // Host that will be used in `server` mode to start HTTP server. analyzerHost: '127.0.0.1', // Port that will be used in `server` mode to start HTTP server. analyzerPort: 8888, // Path to bundle report file that will be generated in `static` mode. // Relative to bundles output directory. reportFilename: filename, // Module sizes to show in report by default. // Should be one of `stat`, `parsed` or `gzip`. // See "Definitions" section for more information. defaultSizes: 'parsed', // Automatically open report in default browser openAnalyzer: enabled, // If `true`, Webpack Stats JSON file will be generated in bundles output directory generateStatsFile: false, // Name of Webpack Stats JSON file that will be generated if `generateStatsFile` is `true`. // Relative to bundles output directory. statsFilename: 'stats.json', // Options for `stats.toJson()` method. // For example you can exclude sources of your modules from stats file with `source: false` option. // See more options here: https://github.com/webpack/webpack/blob/webpack-1/lib/Stats.js#L21 statsOptions: null, // Log level. Can be 'info', 'warn', 'error' or 'silent'. logLevel: 'info', }); }
getCommonRules
identifier_name
webpack.ts
/* eslint-disable no-underscore-dangle */ /* eslint-disable @typescript-eslint/no-var-requires */ import * as path from 'path'; import * as url from 'url'; import * as webpack from 'webpack'; import type { BroilerConfig } from './config'; import { executeSync } from './exec'; // Webpack plugins const ExtractCssChunks = require('extract-css-chunks-webpack-plugin'); const FaviconsWebpackPlugin = require('favicons-webpack-plugin'); const ForkTsCheckerWebpackPlugin = require('fork-ts-checker-webpack-plugin'); const HtmlWebpackPlugin = require('html-webpack-plugin'); const { BundleAnalyzerPlugin } = require('webpack-bundle-analyzer'); export interface WebpackConfigOptions extends BroilerConfig { devServer: boolean; analyze: boolean; } /** * Creates the Webpack 2 configuration for the front-end asset compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getFrontendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { devServer, debug, iconFile, sourceDir, buildDir, stageDir, title, siteFile, projectRootPath, analyze, assetsRoot, serverRoot, } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const stageDirPath = path.resolve(projectRootPath, stageDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; const assetsOrigin = `${assetsRootUrl.protocol}//${assetsRootUrl.host}`; const gitCommitHash = executeSync('git rev-parse HEAD'); const gitVersion = executeSync('git describe --always --dirty="-$(git diff-tree HEAD | md5 -q | head -c 8)"'); const gitBranch = executeSync('git rev-parse --abbrev-ref HEAD'); // Generate the plugins const plugins: webpack.Plugin[] = [ ...getCommonPlugins({ frontend: true, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath }), // Create HTML plugins for each webpage new HtmlWebpackPlugin({ title, filename: devServer ? 'index.html' : 'index.[hash].html', template: path.resolve(__dirname, './res/index.html'), chunks: ['app'], // Insert tags for stylesheets and scripts inject: 'body', // No cache-busting needed, because hash is included in file names hash: false, }), /** * Replace "global variables" from the scripts with the constant values. */ new webpack.DefinePlugin({ // Allow using the GIT commit hash ID __COMMIT_HASH__: JSON.stringify(gitCommitHash), // Allow using the GIT version __VERSION__: JSON.stringify(gitVersion), // Allow using the GIT branch name __BRANCH__: JSON.stringify(gitBranch), }), ]; if (!devServer) { plugins.push( // Generate some stats for the bundles getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-frontend.html`)), ); } // Define the entry for the app const entries: Record<string, string[]> = { app: [require.resolve(devServer ? './bootstrap/local-site' : './bootstrap/site')], }; /** * If icon source file is provided, generate icons for the app. * For configuration, see https://github.com/jantimon/favicons-webpack-plugin */ if (iconFile) { plugins.push( new FaviconsWebpackPlugin({ // Your source logo logo: path.resolve(sourceDirPath, iconFile), // The prefix for all image files (might be a folder or a name) prefix: devServer ? `${assetsFilePrefix}icons/` : `${assetsFilePrefix}icons/[hash]/`, // Inject the html into the html-webpack-plugin inject: true, // Locate the cache folder inside the .broiler directory cache: path.resolve(stageDirPath, '.fwp-cache'), // The configuration passed to `favicons`: // https://github.com/itgalaxy/favicons#usage // NOTE: The most of the metadata is read automatically from package.json favicons: { // Your application's name. `string` appName: title, // TODO: Your application's description. `string` appDescription: null, // TODO: Your (or your developer's) name. `string` developerName: null, // TODO: Your (or your developer's) URL. `string` developerURL: null, // TODO: Your application's version string. `string` version: null, // Start URL when launching the application from a device. `string` start_url: serverRoot, // Print logs to console? `boolean` logging: false, /** * Which icons should be generated. * Platform Options: * - offset - offset in percentage * - shadow - drop shadow for Android icons, available online only * - background: * * false - use default * * true - force use default, e.g. set background for Android icons * * color - set background for the specified icons */ icons: { // Create Android homescreen icon. `boolean` or `{ offset, background, shadow }` android: !devServer && !debug, // Create Apple touch icons. `boolean` or `{ offset, background }` appleIcon: !devServer && !debug, // Create Apple startup images. `boolean` or `{ offset, background }` appleStartup: !devServer && !debug, // Create Opera Coast icon with offset 25%. `boolean` or `{ offset, background }` coast: false, // Create regular favicons. `boolean` favicons: true, // Create Firefox OS icons. `boolean` or `{ offset, background }` firefox: false, // Create Windows 8 tile icons. `boolean` or `{ background }` windows: !devServer && !debug, // Create Yandex browser icon. `boolean` or `{ background }` yandex: false, }, }, }), ); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', // The main entry points for source files. entry: entries, // Supposed to run in a browser target: 'web', output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? `${assetsFilePrefix}[name].js` : `${assetsFilePrefix}[name].[chunkhash].js`, // The URL to the output directory resolved relative to the HTML page // This will be the origin, not including the path, because that will be used as a subdirectory for files. publicPath: `${assetsOrigin}/`, // The name of the exported library, e.g. the global variable name library: 'app', // How the library is exported? E.g. 'var', 'this' libraryTarget: 'var', }, module: { rules: getCommonRules({ tsConfigPath, debug, devServer, assetsFilePrefix, emitFile: true }), }, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: { // The entry point will `require` this module for finding the website component _site: path.resolve(projectRootPath, sourceDir, siteFile), }, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Behavior for polyfilling node modules node: { // The default value `true` seems not to work with RxJS // TODO: Take a look if this can be enabled setImmediate: false, }, // Enable sourcemaps for debugging webpack's output. devtool: devServer ? 'inline-source-map' : 'source-map', // Plugins plugins, }; } /** * Creates the Webpack 2 configuration for the back-end code compilation. * The options are documented at * https://webpack.js.org/configuration/ */ export function getBackendWebpackConfig(config: WebpackConfigOptions): webpack.Configuration { const { serverFile, databaseFile, siteFile, triggersFile } = config; const { sourceDir, buildDir, projectRootPath, devServer, debug, assetsRoot } = config; const { analyze, stageDir } = config; // Resolve modules, source, build and static paths const sourceDirPath = path.resolve(projectRootPath, sourceDir); const buildDirPath = path.resolve(projectRootPath, buildDir); const modulesDirPath = path.resolve(projectRootPath, 'node_modules'); const ownModulesDirPath = path.resolve(__dirname, 'node_modules'); const stageDirPath = path.resolve(projectRootPath, stageDir); // Use the tsconfig.json in the project folder (not in this library) const tsConfigPath = path.resolve(projectRootPath, './tsconfig.json'); // Target backend always to ES2018 const compilerOptions = { target: 'ES2017' } as const; // Determine the directory for the assets and the site const assetsRootUrl = url.parse(assetsRoot); const assetsPath = assetsRootUrl.pathname || '/'; const assetsDir = assetsPath.replace(/^\/+/, ''); const assetsFilePrefix = assetsDir && `${assetsDir}/`; // Generate the plugins const plugins: webpack.Plugin[] = [ // Perform type checking for TypeScript ...getCommonPlugins({ frontend: false, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions, }), /** * Prevent `pg` module to import `pg-native` binding library. */ new webpack.IgnorePlugin({ resourceRegExp: /^\.\/native$/, contextRegExp: /node_modules\/pg\/lib$/, }), ]; if (!devServer) { // Generate some stats for the bundles plugins.push(getBundleAnalyzerPlugin(analyze, path.resolve(stageDirPath, `report-backend.html`))); } // Entry points to be bundled const entries: Record<string, string> = { // Entry point for rendering the views on server-side server: require.resolve(devServer ? './bootstrap/local-server' : './bootstrap/server'), }; // Aliases that entry points will `require` const aliases: Record<string, string> = { _site: path.resolve(projectRootPath, sourceDir, siteFile), }; // Modules excluded from the bundle const externals = [ // No need to bundle AWS SDK for compilation, because it will be available in the Lambda node environment 'aws-sdk', ]; // If an API is defined, compile it as well if (serverFile)
else { // API not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_service'); } // If a database defined, compile it as well if (databaseFile) { // eslint-disable-next-line no-underscore-dangle aliases._db = path.resolve(projectRootPath, sourceDir, databaseFile); } else { // Database not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_db'); } // If a triggers file is defined, compile it as well if (triggersFile) { // eslint-disable-next-line no-underscore-dangle aliases._triggers = path.resolve(projectRootPath, sourceDir, triggersFile); } else { // Triggers not available. Let the bundle to compile without it, but // raise error if attempting to `require` externals.push('_triggers'); } return { context: projectRootPath, // Development or production build? mode: devServer || debug ? 'development' : 'production', optimization: { // For better tracebacks, do not minify server-side code, // even in production. minimize: false, }, // Build for running in node environment, instead of web browser target: 'node', // The main entry points for source files. entry: entries, output: { // Output files are placed to this folder path: buildDirPath, // The file name template for the entry chunks filename: devServer ? '[name].js' : '[name].[hash].js', // The URL to the output directory resolved relative to the HTML page publicPath: `${assetsRoot}/`, // Export so for use in a Lambda function libraryTarget: 'commonjs2', }, module: { rules: getCommonRules({ devServer, debug, tsConfigPath, compilerOptions, assetsFilePrefix, emitFile: false, }), }, externals, resolve: { // Add '.ts' and '.tsx' as resolvable extensions. extensions: ['.ts', '.tsx', '.js'], alias: aliases, }, resolveLoader: { // Look from this library's node modules! modules: [ownModulesDirPath, modulesDirPath], }, // Enable sourcemaps for debugging webpack's output. devtool: 'source-map', // Plugins plugins, }; } function getCommonPlugins(options: { frontend: boolean; devServer: boolean; assetsFilePrefix: string; tsConfigPath: string; sourceDirPath: string; compilerOptions?: unknown; }): webpack.Plugin[] { const { frontend, devServer, assetsFilePrefix, tsConfigPath, sourceDirPath, compilerOptions } = options; const cssFilePrefix = `${assetsFilePrefix}css/`; return [ // https://github.com/faceyspacey/extract-css-chunks-webpack-plugin new ExtractCssChunks({ filename: devServer ? `${cssFilePrefix}[name].css` : `${cssFilePrefix}[contenthash].css`, chunkFilename: devServer ? `${cssFilePrefix}[id].css` : `${cssFilePrefix}[contenthash].css`, ignoreOrder: false, }), // Perform type checking for TypeScript new ForkTsCheckerWebpackPlugin({ typescript: { configFile: tsConfigPath, configOverwrite: { compilerOptions, }, }, // When running the dev server, the backend compilation will handle ESlinting eslint: frontend && devServer ? undefined : { files: path.join(sourceDirPath, '**', '*.{ts,tsx,js,jsx}'), }, }), // Prevent all the MomentJS locales to be imported by default. new webpack.ContextReplacementPlugin( /\bmoment[/\\]locale\b/, // Regular expression to match the files that should be imported /\ben.js/, ), ]; } function getCommonRules(options: { assetsFilePrefix: string; debug: boolean; devServer: boolean; tsConfigPath: string; emitFile: boolean; compilerOptions?: unknown; }): webpack.RuleSetRule[] { const { tsConfigPath, compilerOptions, debug, devServer, assetsFilePrefix, emitFile } = options; return [ // Pre-process sourcemaps for scripts { test: /\.(jsx?|tsx?)$/, loader: 'source-map-loader', enforce: 'pre' as const, }, // Compile TypeScript files ('.ts' or '.tsx') { test: /\.tsx?$/, loader: 'ts-loader', options: { // Explicitly expect the tsconfig.json to be located at the project root configFile: tsConfigPath, // Disable type checker - use `fork-ts-checker-webpack-plugin` for that purpose instead transpileOnly: true, compilerOptions, }, }, // Extract stylesheets as separate CSS files { test: /\.css$/i, sideEffects: true, use: [ { loader: ExtractCssChunks.loader, options: { esModule: true, }, }, { loader: 'css-loader', options: { modules: { mode: 'local', // Auto-generated class names contain the original name on development localIdentName: debug || devServer ? '[local]--[hash:base64:5]' : '[hash:base64]', }, }, }, ], }, // Optimize image files and bundle them as files or data URIs { test: /\.(gif|png|jpe?g|svg)$/, use: [ { loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}images/[name].[hash].[ext]`, }, }, { loader: 'image-webpack-loader', options: { disable: debug || devServer, optipng: { optimizationLevel: 7, }, }, }, ], }, // Include font files either as data URIs or separate files { test: /\.(eot|ttf|otf|woff2?|svg)($|\?|#)/, loader: 'url-loader', options: { // Max bytes to be converted to inline data URI limit: 100, // Asset files Files not emitted on server-side compilation emitFile, // If larger, then convert to a file instead name: `${assetsFilePrefix}fonts/[name].[hash].[ext]`, }, }, ]; } function getBundleAnalyzerPlugin(enabled: boolean, filename: string) { return new BundleAnalyzerPlugin({ // Can be `server`, `static` or `disabled`. // In `server` mode analyzer will start HTTP server to show bundle report. // In `static` mode single HTML file with bundle report will be generated. // In `disabled` mode you can use this plugin to just generate Webpack Stats JSON file by setting `generateStatsFile` to `true`. analyzerMode: enabled ? 'static' : 'disabled', // Host that will be used in `server` mode to start HTTP server. analyzerHost: '127.0.0.1', // Port that will be used in `server` mode to start HTTP server. analyzerPort: 8888, // Path to bundle report file that will be generated in `static` mode. // Relative to bundles output directory. reportFilename: filename, // Module sizes to show in report by default. // Should be one of `stat`, `parsed` or `gzip`. // See "Definitions" section for more information. defaultSizes: 'parsed', // Automatically open report in default browser openAnalyzer: enabled, // If `true`, Webpack Stats JSON file will be generated in bundles output directory generateStatsFile: false, // Name of Webpack Stats JSON file that will be generated if `generateStatsFile` is `true`. // Relative to bundles output directory. statsFilename: 'stats.json', // Options for `stats.toJson()` method. // For example you can exclude sources of your modules from stats file with `source: false` option. // See more options here: https://github.com/webpack/webpack/blob/webpack-1/lib/Stats.js#L21 statsOptions: null, // Log level. Can be 'info', 'warn', 'error' or 'silent'. logLevel: 'info', }); }
{ // eslint-disable-next-line no-underscore-dangle aliases._service = path.resolve(projectRootPath, sourceDir, serverFile); }
conditional_block
calibration.py
### Calibration과 이미지 Merge에 사용되는 모듈 ### import numpy as np from PIL import Image # from skimage import color, viewer, img_as_float, img_as_ubyte, img_as_uint, data # from skimage.filters import gaussian # from skimage.color import rgb2gray # import matplotlib.image as mpimg import matplotlib.pylab as plt import seaborn as sns import os import cv2 from DuxCamera.ImgProcessing.basic import point2_distance, get_crosspt # BLOB 찾는 함수 # https://www.learnopencv.com/blob-detection-using-opencv-python-c/ # https://www.theteams.kr/teams/7191/post/70373 # https://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=blob def simpleBlobDetect(img) : img_copy = np.copy(img) blob_info = [] # 추출한 blob 정보 # blob detection params = cv2.SimpleBlobDetector_Params() params.blobColor = 255 # 밝은 얼룩 추출 # params.minThreshold = 240 # params.maxThreshold = 255 params.filterByArea = True params.minArea = 10*10; params.maxArea = 200*200 params.filterByCircularity = True params.minCircularity = 0.8; # 원 = 1.0 # 사각형 = 0.785 params.filterByConvexity = False params.filterByInertia = True params.minInertiaRatio = 0.7; # 타원~원 = 0~1 # 줄 = 0 detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(img_copy) print('Detecting한 Blob개수 : ', len(keypoints)) # Blob labeling 수행 im_with_keypoints = cv2.drawKeypoints(img_copy, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) for k in keypoints : x, y = k.pt x,y = int(x), int(y) print(k.pt, k.size,k.class_id) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x) cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10) cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10) blob_info.append([x,y,k.size]) # x,y, diameter 정보 blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용 plt.figure(figsize=(15,15)) plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img') plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info') plt.show(); return blob_info # for quad camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_quadcam(img, blob_info) : img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob x_max_blob = blob_info[np.argmax(blob_info[::, 0])] y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob y_max_blob = blob_info[np.argmax(blob_info[::, 1])] # int로 변경 x_min_blob = x_min_blob.astype(np.int) x_max_blob = x_max_blob.astype(np.int) y_min_blob = y_min_blob.astype(np.int) y_max_blob = y_max_blob.astype(np.int) print('x_min_blob : ', x_min_blob[0:2]) print('x_max_blob : ', x_max_blob[0:2]) print('y_min_blob : ', y_min_blob[0:2]) print('y_max_blob : ', y_max_blob[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob[0],x_max_blob[1]], [y_min_blob[0],y_min_blob[1]], [x_min_blob[0],x_min_blob[1]], [y_max_blob[0],y_max_blob[1]]], np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2]) cX = int(cX) cY = int(cY)
print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분 ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # for single camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_singlecam(img, blob_info): img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 # blob_info = [x,y,diameter] # find 5 ymin 5 ymax blob sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군 y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군 x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1) x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2) x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3) x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4) # int로 변경 x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int) x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int) x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int) x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int) print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2]) print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2]) print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2]) print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]], [x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]], [x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]], [x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]]], np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(img_copy, [pts], isClosed=True, color=(155, 155, 155), thickness=10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # # cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) # cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2], x_max_blob_of_y_min[0:2], x_min_blob_of_y_max[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(x_max_blob_of_y_min[0:2], x_min_blob_of_y_min[0:2]) # 'ㄱ'의 'ㅡ'부분 # 1 - 2 ref_square_h = point2_distance(x_max_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2]) # 'ㄱ'의 '|'부분 # 1 - 4 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20, 10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ( (x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]), (x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]), (x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]), (x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # 지정한 중점에서 theta만큼 이미지 돌리기 def img_affine(img, centroid, theta) : #### aFFINE 시 0~255 -> 0~1로 변경 img_copy = np.copy(img) # 회전하기 전에 center 표시 하여 얼마나 돌아갔는지 확인 img_copy = cv2.circle(img_copy, centroid, 1, (220, 220, 0), 30) img_copy = cv2.putText(img_copy, 'theta = ' + str(theta), centroid, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255), cv2.LINE_AA) # 회전 opcn cv ''' img_h, img_w = img.shape[0:2] # matrix = cv2.getRotationMatrix2D((img_w/2, img_h/2), theta, 1) matrix = cv2.getRotationMatrix2D(centroid, theta, 1) dst = cv2.warpAffine(img, matrix, (img_w, img_h)) # 0~1로 변경됨 ''' # 회전 pil img_h, img_w = img.shape[0:2] # pil 객체로 변경 dst = Image.fromarray(img.astype('uint8'), 'L') dst = dst.rotate(theta, center=centroid, expand=False, resample=Image.NEAREST) # theta만큼 회전 # 다시 numpy로 변경 dst = np.array(dst) plt.figure(figsize=(10,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Before_affine') plt.subplot(122), plt.imshow(dst, cmap='gray'), plt.title('After_affine') plt.show(); print('img. max : ', np.max(np.unique(img)), 'img. min : ', np.min(np.unique(img))) print('affine img. max : ', np.max(np.unique(dst)), 'affine img. min : ', np.min(np.unique(dst))) return dst # centroid를 중심으로 돌린 이미지를 설정한 기준으로 hw를 잘라 합성하기 위한 이미지를 만드는 함수 def img_cutting(img, centroids, shape = 1500) : # 2000 img_copy = np.copy(img) result_h, result_w = shape, shape center_x, center_y = centroids # 시작 인덱스 start_x = center_x - int(result_w / 2) start_y = center_y - int(result_h / 2) result = img_copy[start_y : start_y+result_h, start_x : start_x+result_w, ...] print(result.shape) return result # 4개의 쿼터 이미지 지정한 center와 theta로 affine하여 merge된 완성된 이미지 추출 def img_merge(img_list, centroids_list, theta_list) : # 1,2,3,4,로 정렬된 각 쿼터 img list, 각 쿼터 center정보, 각 쿼터 theta정보 q1_img, q2_img, q3_img, q4_img = img_list q1_theta, q2_theta, q3_theta, q4_theta = theta_list q1_centroid, q2_centroid, q3_centroid, q4_centroid = centroids_list print('before merge img range = [ {} {}]'.format(np.unique(q1_img)[0], np.unique(q1_img)[-1])) # q1 unique range 정보 q1_affine_img = img_affine(q1_img, q1_centroid, q1_theta) q2_affine_img = img_affine(q2_img, q2_centroid, q2_theta) q3_affine_img = img_affine(q3_img, q3_centroid, q3_theta) q4_affine_img = img_affine(q4_img, q4_centroid, q4_theta) q1_cut = img_cutting(q1_affine_img, q1_centroid) q2_cut = img_cutting(q2_affine_img, q2_centroid) q3_cut = img_cutting(q3_affine_img, q3_centroid) q4_cut = img_cutting(q4_affine_img, q4_centroid) # zeros 생성않하고 할 경우 uint8로 되어 [0-255]만 저장되어 255이상 값 overflow로 처리됨 => np.float32로 변경 merged_img = q1_cut.astype(np.float32) + q2_cut.astype(np.float32) + q3_cut.astype(np.float32) + q4_cut.astype(np.float32) print('after merge img range = [ {} {}]'.format(np.unique(merged_img)[0], np.unique(merged_img)[-1])) # merged range 정보 return merged_img
cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1)
random_line_split
calibration.py
### Calibration과 이미지 Merge에 사용되는 모듈 ### import numpy as np from PIL import Image # from skimage import color, viewer, img_as_float, img_as_ubyte, img_as_uint, data # from skimage.filters import gaussian # from skimage.color import rgb2gray # import matplotlib.image as mpimg import matplotlib.pylab as plt import seaborn as sns import os import cv2 from DuxCamera.ImgProcessing.basic import point2_distance, get_crosspt # BLOB 찾는 함수 # https://www.learnopencv.com/blob-detection-using-opencv-python-c/ # https://www.theteams.kr/teams/7191/post/70373 # https://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=blob def simpleBlobDetect(img) : img_copy = np.copy(img) blob_info = [] # 추출한 blob 정보 # blob detection params = cv2.SimpleBlobDetector_Params() params.blobColor = 255 # 밝은 얼룩 추출 # params.minThreshold = 240 # params.maxThreshold = 255 params.filterByArea = True params.minArea = 10*10; params.maxArea = 200*200 params.filterByCircularity = True params.minCircularity = 0.8; # 원 = 1.0 # 사각형 = 0.785 params.filterByConvexity = False params.filterByInertia = True params.minInertiaRatio = 0.7; # 타원~원 = 0~1 # 줄 = 0 detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(img_copy) print('Detecting한 Blob개수 : ', len(keypoints)) # Blob labeling 수행 im_with_keypoints = cv2.drawKeypoints(img_copy, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) for k in keypoints : x, y = k.pt x,y = int(x), int(y) print(k.pt, k.size,k.class_i
plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img') plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info') plt.show(); return blob_info # for quad camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_quadcam(img, blob_info) : img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob x_max_blob = blob_info[np.argmax(blob_info[::, 0])] y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob y_max_blob = blob_info[np.argmax(blob_info[::, 1])] # int로 변경 x_min_blob = x_min_blob.astype(np.int) x_max_blob = x_max_blob.astype(np.int) y_min_blob = y_min_blob.astype(np.int) y_max_blob = y_max_blob.astype(np.int) print('x_min_blob : ', x_min_blob[0:2]) print('x_max_blob : ', x_max_blob[0:2]) print('y_min_blob : ', y_min_blob[0:2]) print('y_max_blob : ', y_max_blob[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob[0],x_max_blob[1]], [y_min_blob[0],y_min_blob[1]], [x_min_blob[0],x_min_blob[1]], [y_max_blob[0],y_max_blob[1]]], np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분 ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # for single camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_singlecam(img, blob_info): img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 # blob_info = [x,y,diameter] # find 5 ymin 5 ymax blob sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군 y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군 x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1) x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2) x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3) x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4) # int로 변경 x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int) x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int) x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int) x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int) print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2]) print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2]) print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2]) print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]], [x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]], [x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]], [x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]]], np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(img_copy, [pts], isClosed=True, color=(155, 155, 155), thickness=10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # # cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) # cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2], x_max_blob_of_y_min[0:2], x_min_blob_of_y_max[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(x_max_blob_of_y_min[0:2], x_min_blob_of_y_min[0:2]) # 'ㄱ'의 'ㅡ'부분 # 1 - 2 ref_square_h = point2_distance(x_max_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2]) # 'ㄱ'의 '|'부분 # 1 - 4 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20, 10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ( (x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]), (x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]), (x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]), (x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # 지정한 중점에서 theta만큼 이미지 돌리기 def img_affine(img, centroid, theta) : #### aFFINE 시 0~255 -> 0~1로 변경 img_copy = np.copy(img) # 회전하기 전에 center 표시 하여 얼마나 돌아갔는지 확인 img_copy = cv2.circle(img_copy, centroid, 1, (220, 220, 0), 30) img_copy = cv2.putText(img_copy, 'theta = ' + str(theta), centroid, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255), cv2.LINE_AA) # 회전 opcn cv ''' img_h, img_w = img.shape[0:2] # matrix = cv2.getRotationMatrix2D((img_w/2, img_h/2), theta, 1) matrix = cv2.getRotationMatrix2D(centroid, theta, 1) dst = cv2.warpAffine(img, matrix, (img_w, img_h)) # 0~1로 변경됨 ''' # 회전 pil img_h, img_w = img.shape[0:2] # pil 객체로 변경 dst = Image.fromarray(img.astype('uint8'), 'L') dst = dst.rotate(theta, center=centroid, expand=False, resample=Image.NEAREST) # theta만큼 회전 # 다시 numpy로 변경 dst = np.array(dst) plt.figure(figsize=(10,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Before_affine') plt.subplot(122), plt.imshow(dst, cmap='gray'), plt.title('After_affine') plt.show(); print('img. max : ', np.max(np.unique(img)), 'img. min : ', np.min(np.unique(img))) print('affine img. max : ', np.max(np.unique(dst)), 'affine img. min : ', np.min(np.unique(dst))) return dst # centroid를 중심으로 돌린 이미지를 설정한 기준으로 hw를 잘라 합성하기 위한 이미지를 만드는 함수 def img_cutting(img, centroids, shape = 1500) : # 2000 img_copy = np.copy(img) result_h, result_w = shape, shape center_x, center_y = centroids # 시작 인덱스 start_x = center_x - int(result_w / 2) start_y = center_y - int(result_h / 2) result = img_copy[start_y : start_y+result_h, start_x : start_x+result_w, ...] print(result.shape) return result # 4개의 쿼터 이미지 지정한 center와 theta로 affine하여 merge된 완성된 이미지 추출 def img_merge(img_list, centroids_list, theta_list) : # 1,2,3,4,로 정렬된 각 쿼터 img list, 각 쿼터 center정보, 각 쿼터 theta정보 q1_img, q2_img, q3_img, q4_img = img_list q1_theta, q2_theta, q3_theta, q4_theta = theta_list q1_centroid, q2_centroid, q3_centroid, q4_centroid = centroids_list print('before merge img range = [ {} {}]'.format(np.unique(q1_img)[0], np.unique(q1_img)[-1])) # q1 unique range 정보 q1_affine_img = img_affine(q1_img, q1_centroid, q1_theta) q2_affine_img = img_affine(q2_img, q2_centroid, q2_theta) q3_affine_img = img_affine(q3_img, q3_centroid, q3_theta) q4_affine_img = img_affine(q4_img, q4_centroid, q4_theta) q1_cut = img_cutting(q1_affine_img, q1_centroid) q2_cut = img_cutting(q2_affine_img, q2_centroid) q3_cut = img_cutting(q3_affine_img, q3_centroid) q4_cut = img_cutting(q4_affine_img, q4_centroid) # zeros 생성않하고 할 경우 uint8로 되어 [0-255]만 저장되어 255이상 값 overflow로 처리됨 => np.float32로 변경 merged_img = q1_cut.astype(np.float32) + q2_cut.astype(np.float32) + q3_cut.astype(np.float32) + q4_cut.astype(np.float32) print('after merge img range = [ {} {}]'.format(np.unique(merged_img)[0], np.unique(merged_img)[-1])) # merged range 정보 return merged_img
d) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x) cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10) cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10) blob_info.append([x,y,k.size]) # x,y, diameter 정보 blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용 plt.figure(figsize=(15,15))
conditional_block
calibration.py
### Calibration과 이미지 Merge에 사용되는 모듈 ### import numpy as np from PIL import Image # from skimage import color, viewer, img_as_float, img_as_ubyte, img_as_uint, data # from skimage.filters import gaussian # from skimage.color import rgb2gray # import matplotlib.image as mpimg import matplotlib.pylab as plt import seaborn as sns import os import cv2 from DuxCamera.ImgProcessing.basic import point2_distance, get_crosspt # BLOB 찾는 함수 # https://www.learnopencv.com/blob-detection-using-opencv-python-c/ # https://www.theteams.kr/teams/7191/post/70373 # https://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=blob def simpleBlobDetect(img) : img_copy = np.copy(img) bl
am(img, blob_info) : img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob x_max_blob = blob_info[np.argmax(blob_info[::, 0])] y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob y_max_blob = blob_info[np.argmax(blob_info[::, 1])] # int로 변경 x_min_blob = x_min_blob.astype(np.int) x_max_blob = x_max_blob.astype(np.int) y_min_blob = y_min_blob.astype(np.int) y_max_blob = y_max_blob.astype(np.int) print('x_min_blob : ', x_min_blob[0:2]) print('x_max_blob : ', x_max_blob[0:2]) print('y_min_blob : ', y_min_blob[0:2]) print('y_max_blob : ', y_max_blob[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob[0],x_max_blob[1]], [y_min_blob[0],y_min_blob[1]], [x_min_blob[0],x_min_blob[1]], [y_max_blob[0],y_max_blob[1]]], np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분 ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # for single camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_singlecam(img, blob_info): img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 # blob_info = [x,y,diameter] # find 5 ymin 5 ymax blob sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군 y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군 x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1) x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2) x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3) x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4) # int로 변경 x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int) x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int) x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int) x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int) print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2]) print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2]) print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2]) print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]], [x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]], [x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]], [x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]]], np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(img_copy, [pts], isClosed=True, color=(155, 155, 155), thickness=10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # # cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) # cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2], x_max_blob_of_y_min[0:2], x_min_blob_of_y_max[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(x_max_blob_of_y_min[0:2], x_min_blob_of_y_min[0:2]) # 'ㄱ'의 'ㅡ'부분 # 1 - 2 ref_square_h = point2_distance(x_max_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2]) # 'ㄱ'의 '|'부분 # 1 - 4 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20, 10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ( (x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]), (x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]), (x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]), (x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # 지정한 중점에서 theta만큼 이미지 돌리기 def img_affine(img, centroid, theta) : #### aFFINE 시 0~255 -> 0~1로 변경 img_copy = np.copy(img) # 회전하기 전에 center 표시 하여 얼마나 돌아갔는지 확인 img_copy = cv2.circle(img_copy, centroid, 1, (220, 220, 0), 30) img_copy = cv2.putText(img_copy, 'theta = ' + str(theta), centroid, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255), cv2.LINE_AA) # 회전 opcn cv ''' img_h, img_w = img.shape[0:2] # matrix = cv2.getRotationMatrix2D((img_w/2, img_h/2), theta, 1) matrix = cv2.getRotationMatrix2D(centroid, theta, 1) dst = cv2.warpAffine(img, matrix, (img_w, img_h)) # 0~1로 변경됨 ''' # 회전 pil img_h, img_w = img.shape[0:2] # pil 객체로 변경 dst = Image.fromarray(img.astype('uint8'), 'L') dst = dst.rotate(theta, center=centroid, expand=False, resample=Image.NEAREST) # theta만큼 회전 # 다시 numpy로 변경 dst = np.array(dst) plt.figure(figsize=(10,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Before_affine') plt.subplot(122), plt.imshow(dst, cmap='gray'), plt.title('After_affine') plt.show(); print('img. max : ', np.max(np.unique(img)), 'img. min : ', np.min(np.unique(img))) print('affine img. max : ', np.max(np.unique(dst)), 'affine img. min : ', np.min(np.unique(dst))) return dst # centroid를 중심으로 돌린 이미지를 설정한 기준으로 hw를 잘라 합성하기 위한 이미지를 만드는 함수 def img_cutting(img, centroids, shape = 1500) : # 2000 img_copy = np.copy(img) result_h, result_w = shape, shape center_x, center_y = centroids # 시작 인덱스 start_x = center_x - int(result_w / 2) start_y = center_y - int(result_h / 2) result = img_copy[start_y : start_y+result_h, start_x : start_x+result_w, ...] print(result.shape) return result # 4개의 쿼터 이미지 지정한 center와 theta로 affine하여 merge된 완성된 이미지 추출 def img_merge(img_list, centroids_list, theta_list) : # 1,2,3,4,로 정렬된 각 쿼터 img list, 각 쿼터 center정보, 각 쿼터 theta정보 q1_img, q2_img, q3_img, q4_img = img_list q1_theta, q2_theta, q3_theta, q4_theta = theta_list q1_centroid, q2_centroid, q3_centroid, q4_centroid = centroids_list print('before merge img range = [ {} {}]'.format(np.unique(q1_img)[0], np.unique(q1_img)[-1])) # q1 unique range 정보 q1_affine_img = img_affine(q1_img, q1_centroid, q1_theta) q2_affine_img = img_affine(q2_img, q2_centroid, q2_theta) q3_affine_img = img_affine(q3_img, q3_centroid, q3_theta) q4_affine_img = img_affine(q4_img, q4_centroid, q4_theta) q1_cut = img_cutting(q1_affine_img, q1_centroid) q2_cut = img_cutting(q2_affine_img, q2_centroid) q3_cut = img_cutting(q3_affine_img, q3_centroid) q4_cut = img_cutting(q4_affine_img, q4_centroid) # zeros 생성않하고 할 경우 uint8로 되어 [0-255]만 저장되어 255이상 값 overflow로 처리됨 => np.float32로 변경 merged_img = q1_cut.astype(np.float32) + q2_cut.astype(np.float32) + q3_cut.astype(np.float32) + q4_cut.astype(np.float32) print('after merge img range = [ {} {}]'.format(np.unique(merged_img)[0], np.unique(merged_img)[-1])) # merged range 정보 return merged_img
ob_info = [] # 추출한 blob 정보 # blob detection params = cv2.SimpleBlobDetector_Params() params.blobColor = 255 # 밝은 얼룩 추출 # params.minThreshold = 240 # params.maxThreshold = 255 params.filterByArea = True params.minArea = 10*10; params.maxArea = 200*200 params.filterByCircularity = True params.minCircularity = 0.8; # 원 = 1.0 # 사각형 = 0.785 params.filterByConvexity = False params.filterByInertia = True params.minInertiaRatio = 0.7; # 타원~원 = 0~1 # 줄 = 0 detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(img_copy) print('Detecting한 Blob개수 : ', len(keypoints)) # Blob labeling 수행 im_with_keypoints = cv2.drawKeypoints(img_copy, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) for k in keypoints : x, y = k.pt x,y = int(x), int(y) print(k.pt, k.size,k.class_id) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x) cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10) cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10) blob_info.append([x,y,k.size]) # x,y, diameter 정보 blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용 plt.figure(figsize=(15,15)) plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img') plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info') plt.show(); return blob_info # for quad camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_quadc
identifier_body
calibration.py
### Calibration과 이미지 Merge에 사용되는 모듈 ### import numpy as np from PIL import Image # from skimage import color, viewer, img_as_float, img_as_ubyte, img_as_uint, data # from skimage.filters import gaussian # from skimage.color import rgb2gray # import matplotlib.image as mpimg import matplotlib.pylab as plt import seaborn as sns import os import cv2 from DuxCamera.ImgProcessing.basic import point2_distance, get_crosspt # BLOB 찾는 함수 # https://www.learnopencv.com/blob-detection-using-opencv-python-c/ # https://www.theteams.kr/teams/7191/post/70373 # https://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_feature_detectors.html?highlight=blob def simpleBlobDetect(img) : img_copy = np.copy(img) blob_info = [] # 추출한 blob 정보 # blob detection params = cv2.SimpleBlobDetector_Params() params.blobColor = 255 # 밝은 얼룩 추출 # params.minThreshold = 240 # params.maxThreshold = 255 params.filterByArea = True params.minArea = 10*10; params.maxArea = 200*200 params.filterByCircularity = True params.minCircularity = 0.8; # 원 = 1.0 # 사각형 = 0.785 params.filterByConvexity = False params.filterByInertia = True params.minInertiaRatio = 0.7; # 타원~원 = 0~1 # 줄 = 0 detector = cv2.SimpleBlobDetector_create(params) keypoints = detector.detect(img_copy) print('Detecting한 Blob개수 : ', len(keypoints)) # Blob labeling 수행 im_with_keypoints = cv2.drawKeypoints(img_copy, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) for k in keypoints : x, y = k.pt x,y = int(x), int(y) print(k.pt, k.size,k.class_id) # 추출결과의 중심, 추출결과의 diameter (blob의 직경x) cv2.circle(img_copy, (x,y), 1, (155, 155, 155), 10) cv2.circle(img_copy, (x,y), int(k.size/2), (155, 155, 155), 10) blob_info.append([x,y,k.size]) # x,y, diameter 정보 blob_info = np.array(blob_info) # argmin, argmx 를 위해 numpy 사용 plt.figure(figsize=(15,15)) plt.subplot(121), plt.imshow(img, cmap='gray'), plt.title('origin_binary_img') plt.subplot(122), plt.imshow(img_copy, cmap='gray'), plt.title('Blob info') plt.show(); return blob_info # for quad camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_quadcam(img, blob_info) : img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 x_min_blob = blob_info[np.argmin(blob_info[::, 0])] # 모든 x에서 가장 작은 blob x_max_blob = blob_info[np.argmax(blob_info[::, 0])] y_min_blob = blob_info[np.argmin(blob_info[::, 1])] # 모든 x에서 가장 작은 blob y_max_blob = blob_info[np.argmax(blob_info[::, 1])] # int로 변경 x_min_blob = x_min_blob.astype(np.int) x_max_blob = x_max_blob.astype(np.int) y_min_blob = y_min_blob.astype(np.int) y_max_blob = y_max_blob.astype(np.int) print('x_min_blob : ', x_min_blob[0:2]) print('x_max_blob : ', x_max_blob[0:2]) print('y_min_blob : ', y_min_blob[0:2]) print('y_max_blob : ', y_max_blob[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob[0],x_max_blob[1]], [y_min_blob[0],y_min_blob[1]], [x_min_blob[0],x_min_blob[1]], [y_max_blob[0],y_max_blob[1]]], np.int32) pts = pts.reshape((-1,1,2)) cv2.polylines(img_copy, [pts], isClosed = True, color = (155, 155, 155), thickness = 10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob[0:2], x_max_blob[0:2], y_min_blob[0:2], y_max_blob[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(y_min_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 'ㅡ'부분 ref_square_h = point2_distance(y_max_blob[0:2], x_max_blob[0:2]) # 'ㄱ'의 '|'부분 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ((x_max_blob[0], x_max_blob[1]), (y_min_blob[0], y_min_blob[1]), (x_min_blob[0], x_min_blob[1]), (y_max_blob[0], y_max_blob[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # for single camera # 무게중심(CX,CY) 찾기, 및 BLOB 사각형의 'ㄱ'부분 길이 구하기 (ref_square_w, ref_square_h), def find_centroid_for_singlecam(img, blob_info): img_h, img_w = np.shape(img) img_copy = np.copy(img) # 무게중심 표시를 위한 img img_temp = np.zeros((img_h, img_w), dtype=np.uint8) # 25개의 blob중 가장자리 blob 으로 무게중심 찾기 위한 사각형 # blob_info = [x,y,diameter] # find 5 ymin 5 ymax blob sorted_y_blob = blob_info[blob_info[::, 1].argsort()] # y기준 sort y_min_5_blob = sorted_y_blob[:5] # 모든 y에서 가장 작은 5개 blob 후보군 y_max_5_blob = sorted_y_blob[-5:] # 모든 y에서 가장 큰 5개 blob 후보군 x_max_blob_of_y_min = y_min_5_blob[np.argmax(y_min_5_blob[::, 0])] # (1) x_min_blob_of_y_min = y_min_5_blob[np.argmin(y_min_5_blob[::, 0])] # y min 5 blob 중에서 가장 작은 x blob # (2) x_min_blob_of_y_max = y_max_5_blob[np.argmin(y_max_5_blob[::, 0])] # y max 5 blob 중에서 가장 작은 x blob # (3) x_max_blob_of_y_max = y_max_5_blob[np.argmax(y_max_5_blob[::, 0])] # (4) # int로 변경 x_max_blob_of_y_min = x_max_blob_of_y_min.astype(np.int) x_min_blob_of_y_min = x_min_blob_of_y_min.astype(np.int) x_min_blob_of_y_max = x_min_blob_of_y_max.astype(np.int) x_max_blob_of_y_max = x_max_blob_of_y_max.astype(np.int) print('x_max_blob_of_y_min : ', x_max_blob_of_y_min[0:2]) print('x_min_blob_of_y_min : ', x_min_blob_of_y_min[0:2]) print('x_min_blob_of_y_max : ', x_min_blob_of_y_max[0:2]) print('x_max_blob_of_y_max : ', x_max_blob_of_y_max[0:2]) # side blob point 표시 # cv2.circle(img_temp, (x_min_blob[0], x_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (x_max_blob[0], x_max_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_min_blob[0], y_min_blob[1]), 1, (155, 155, 155), 10) # cv2.circle(img_temp, (y_max_blob[0], y_max_blob[1]), 1, (155, 155, 155), 10) # 해당 side 포인트이 꼭지점을 이루는 사각형 그리기 pts = np.array([[x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]], [x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]], [x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]], [x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]]], np.int32) pts = pts.reshape((-1, 1, 2)) cv2.polylines(img_copy, [pts], isClosed=True, color=(155, 155, 155), thickness=10) # 사각형 그리기 cv2.fillPoly(img_temp, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # cv2.fillPoly(img_copy, [pts], (155, 155, 155), cv2.LINE_AA) # 채워진 사각형 그리기 # img_temp의 무게중심 구하기 # contours, hierarchy = cv2.findContours(img_temp, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # for i in contours: # M = cv2.moments(i) # cX = int(M['m10'] / M['m00']) # cY = int(M['m01'] / M['m00']) # # cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) # cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) # cv2.drawContours(img_temp, [i], 0, (100, 100, 100), 10) ## 두 선분의 교점으로 구하기 cX, cY = get_crosspt(x_min_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2], x_max_blob_of_y_min[0:2], x_min_blob_of_y_max[0:2]) cX = int(cX) cY = int(cY) cv2.circle(img_temp, (cX, cY), 15, (100, 100, 100), -1) cv2.circle(img_copy, (cX, cY), 15, (100, 100, 100), -1) print('Centroid : ', cX, cY) # ref_square에 구하기 'ㄱ'부분 길이 구하기 ref_square_w = point2_distance(x_max_blob_of_y_min[0:2], x_min_blob_of_y_min[0:2]) # 'ㄱ'의 'ㅡ'부분 # 1 - 2 ref_square_h = point2_distance(x_max_blob_of_y_min[0:2], x_max_blob_of_y_max[0:2]) # 'ㄱ'의 '|'부분 # 1 - 4 print('ref_square_w : ', ref_square_w) print('ref_square_h : ', ref_square_h) plt.figure(figsize=(20, 10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Centroid Point') plt.subplot(122), plt.imshow(img_temp, cmap='gray'), plt.title('Ref square from Side_Blob') plt.show(); return ((int(cX), int(cY)), (int(ref_square_w), int(ref_square_h)), ( (x_max_blob_of_y_min[0], x_max_blob_of_y_min[1]), (x_min_blob_of_y_min[0], x_min_blob_of_y_min[1]), (x_min_blob_of_y_max[0], x_min_blob_of_y_max[1]), (x_max_blob_of_y_max[0], x_max_blob_of_y_max[1]))) # 25개 blob의 무게중심(cX, cY) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # return[0] = (cX, cY) # 25 blob 사각형의 무게중심 # return[1] = (ref_square_w, ref_square_h) # 해당 사각형의 w,h # return [2] = (xmax, ymin, xmin, ymax) # ref 사각형의 w,h # 사각형의 네 꼭지점 정보 # 지정한 중점에서 theta만큼 이미지 돌리기 def img_affine(img, centroid, theta) : #### aFFINE 시 0~255 -> 0~1로 변경 img_copy = np.copy(img) # 회전하기 전에 center 표시 하여 얼마나 돌아갔는지 확인 img_copy = cv2.circle(img_copy, centroid, 1, (220, 220, 0), 30) img_copy = cv2.putText(img_copy, 'theta = ' + str(theta), centroid, cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255), cv2.LINE_AA) # 회전 opcn cv ''' img_h, img_w = img.shape[0:2] # matrix = cv2.getRotationMatrix2D((img_w/2, img_h/2), theta, 1) matrix = cv2.getRotationMatrix2D(centroid, theta, 1) dst = cv2.warpAffine(img, matrix, (img_w, img_h)) # 0~1로 변경됨 ''' # 회전 pil img_h, img_w = img.shape[0:2] # pil 객체로 변경 dst = Image.fromarray(img.astype('uint8'), 'L') dst = dst.rotate(theta, center=centroid, expand=False, resample=Image.NEAREST) # theta만큼 회전 # 다시 numpy로 변경 dst = np.array(dst) plt.figure(figsize=(10,10)) plt.subplot(121), plt.imshow(img_copy, cmap='gray'), plt.title('Before_affine') pl
122), plt.imshow(dst, cmap='gray'), plt.title('After_affine') plt.show(); print('img. max : ', np.max(np.unique(img)), 'img. min : ', np.min(np.unique(img))) print('affine img. max : ', np.max(np.unique(dst)), 'affine img. min : ', np.min(np.unique(dst))) return dst # centroid를 중심으로 돌린 이미지를 설정한 기준으로 hw를 잘라 합성하기 위한 이미지를 만드는 함수 def img_cutting(img, centroids, shape = 1500) : # 2000 img_copy = np.copy(img) result_h, result_w = shape, shape center_x, center_y = centroids # 시작 인덱스 start_x = center_x - int(result_w / 2) start_y = center_y - int(result_h / 2) result = img_copy[start_y : start_y+result_h, start_x : start_x+result_w, ...] print(result.shape) return result # 4개의 쿼터 이미지 지정한 center와 theta로 affine하여 merge된 완성된 이미지 추출 def img_merge(img_list, centroids_list, theta_list) : # 1,2,3,4,로 정렬된 각 쿼터 img list, 각 쿼터 center정보, 각 쿼터 theta정보 q1_img, q2_img, q3_img, q4_img = img_list q1_theta, q2_theta, q3_theta, q4_theta = theta_list q1_centroid, q2_centroid, q3_centroid, q4_centroid = centroids_list print('before merge img range = [ {} {}]'.format(np.unique(q1_img)[0], np.unique(q1_img)[-1])) # q1 unique range 정보 q1_affine_img = img_affine(q1_img, q1_centroid, q1_theta) q2_affine_img = img_affine(q2_img, q2_centroid, q2_theta) q3_affine_img = img_affine(q3_img, q3_centroid, q3_theta) q4_affine_img = img_affine(q4_img, q4_centroid, q4_theta) q1_cut = img_cutting(q1_affine_img, q1_centroid) q2_cut = img_cutting(q2_affine_img, q2_centroid) q3_cut = img_cutting(q3_affine_img, q3_centroid) q4_cut = img_cutting(q4_affine_img, q4_centroid) # zeros 생성않하고 할 경우 uint8로 되어 [0-255]만 저장되어 255이상 값 overflow로 처리됨 => np.float32로 변경 merged_img = q1_cut.astype(np.float32) + q2_cut.astype(np.float32) + q3_cut.astype(np.float32) + q4_cut.astype(np.float32) print('after merge img range = [ {} {}]'.format(np.unique(merged_img)[0], np.unique(merged_img)[-1])) # merged range 정보 return merged_img
t.subplot(
identifier_name
ml_cms_heights.py
import cPickle import os import pandas as pd import pdb import numpy as np import logging import sys import calendar import matplotlib.pyplot as plt from math import ceil from sklearn.feature_selection import SelectKBest, f_classif, f_regression from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.learning_curve import learning_curve from sklearn.feature_selection import SelectFromModel from sklearn.ensemble.forest import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import normalize from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn import preprocessing from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_predict from sklearn import linear_model from sklearn.svm import SVR from sklearn.ensemble import ExtraTreesRegressor from ConfigParser import SafeConfigParser from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import train_test_split import constants import compute_stats import rgeo import utils import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # Logging cur_flname = os.path.splitext(os.path.basename(__file__))[0] LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt' logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w', format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL # Output to screen logger = logging.getLogger(cur_flname) if not logger.handlers: logger.addHandler(logging.StreamHandler()) # loop_countries # ; Loop over countries x crops # train_ml_model # create_train_df # compute_ml_vars # ; create ml model # loop_forecasting # create_forecast_df # do_forecasting class MLCms: """ """ def __init__(self, config_file=''): # Parse config file self.parser = SafeConfigParser() self.parser.read(config_file) # machine learning specific variables self.classify = constants.DO_CLASSIFICATION # Regress or classify? self.vars_features = constants.fixed_vars self.vars_target = constants.ML_TARGETS if self.classify: self.var_target = constants.ML_TARGETS self.task = 'classification' self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) else: self.var_target = constants.ML_TARGETS self.task = 'regression' self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR() # Get path to input self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl # Output directory is <dir>_<classification>_<2014> self.path_out_dir = constants.out_dir utils.make_dir_if_missing(self.path_out_dir) # Model pickle self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features' def output_model_importance(self, gs, name_gs, num_cols): """ :param gs: :param name_gs: :param num_cols: :return: """ rows_list = [] name_vars = [] feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_ importances = 100.0 * (feature_importance / feature_importance.max()) std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Store feature ranking in a dataframe for f in range(num_cols): dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]} name_vars.append(self.vars_features[indices[f]]) rows_list.append(dict_results) df_results = pd.DataFrame(rows_list) num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols], std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop, title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')', xlabel=name_vars[:num_cols], out_path=self.path_out_dir) df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv') def get_data(self): """ :return: """ df = pd.read_csv(self.path_inp) cols = [col for col in df.columns if col not in self.vars_features] # cols.extend(['DI', 'PI']) # Add information on PI and DI of soils # iterate over each row, get lat and lon # Find corresponding DI and PI lat_lons = zip(df['Long_round'], df['Lat_round']) vals_di = [] vals_pi = [] # for idx, (lon, lat) in enumerate(lat_lons): # print idx, len(lat_lons) # vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif', # lon, lat, replace_ras=False)) # vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif', # lon, lat, replace_ras=False)) # # df['DI'] = vals_di # df['PI'] = vals_pi df = df[cols] data = df.as_matrix(columns=cols[1:]) target = df.as_matrix(columns=[self.var_target]).ravel() # Get training and testing splits splits = train_test_split(data, target, test_size=0.2) return cols, splits def train_ml_model(self): """ :return: """ logger.info('#########################################################################') logger.info('train_ml_model') logger.info('#########################################################################') ###################################################### # Load dataset ###################################################### cols, splits = self.get_data() data_train, data_test, target_train, target_test = splits # clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu) # #clf = SVR(kernel='rbf', C=1e3, gamma=0.1) # #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) # data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix # #data = preprocessing.scale(data) # target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix # clf.fit(data, target) # # predict_val = clf.predict(after.as_matrix(columns=cols[1:])) # results = compute_stats.ols(predict_val.tolist(), after_target.tolist()) # print results.rsquared # import matplotlib.pyplot as plt # plt.scatter(after_target, predict_val) # plt.show() # pdb.set_trace() if not os.path.isfile(self.path_pickle_model): # For details in scikit workflow: See http://stackoverflow.com/questions/ # 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea # TODO Separate out a dataset so that even the grid search cv can be tested ############################ # Select features from model ############################ logger.info('Selecting important features from model') if self.classify: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) else: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) feat_selection = SelectFromModel(rf_feature_imp) pipeline = Pipeline([ ('fs', feat_selection), ('clf', self.model), ]) ################################# # Grid search for best parameters ################################# C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) logger.info('Tuning hyperparameters') param_grid = { 'fs__threshold': ['mean', 'median'], 'fs__estimator__max_features': ['auto', 'log2'], 'clf__max_features': ['auto', 'log2'], 'clf__n_estimators': [1000, 2000] #'clf__gamma': np.logspace(-9, 3, 13), #'clf__C': np.logspace(-2, 10, 13) } gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan) # Fir the data before getting the best parameter combination. Different data sets will have # different optimized parameter combinations, i.e. without data, there is no optimal parameter combination. gs.fit(data_train, target_train) logger.info(gs.best_params_) data_test = pd.DataFrame(data_test, columns=cols[1:]) # Update features that should be used in model selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]]) cols = selected_features[0] data_test = data_test[cols] # Update model with the best parameters learnt in the previous step self.model = gs.best_estimator_.named_steps['clf'] predict_val = self.model.predict(data_test) results = compute_stats.ols(predict_val.tolist(), target_test.tolist()) print results.rsquared print cols plt.scatter(target_test, predict_val) plt.show() pdb.set_trace() ################################################################### # Output and plot importance of model features, and learning curves ################################################################### self.output_model_importance(gs, 'clf', num_cols=len(cols[1:])) if constants.plot_model_importance: train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold, n_jobs=constants.ncpu) plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve', ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir) # Save the model to disk logger.info('Saving model and features as pickle on disk') with open(self.path_pickle_model, 'wb') as f: cPickle.dump(self.model, f) with open(self.path_pickle_features, 'wb') as f: cPickle.dump(self.vars_features, f) else: # Read model from pickle on disk with open(self.path_pickle_model, 'rb') as f: logger.info('Reading model from pickle on disk') self.model = cPickle.load(f) logger.info('Reading features from pickle on disk') self.vars_features = pd.read_pickle(self.path_pickle_features) return df_cc def do_forecasting(self, df_forecast, mon_names, available_target=False, name_target='yield'): """ 1. Does classification/regression based on already built model. 2. Plots confusion matrix for classification tasks, scatter plot for regression 3. Plots accuracy statistics for classification/regression :param df_forecast: :param mon_names: :param available_target: Is target array available? :param name_target: Name of target array (defaults to yield) :return: """ data = df_forecast.as_matrix(columns=self.vars_features) # convert dataframe column to matrix predicted = self.model.predict(data) if available_target: expected = df_forecast.as_matrix(columns=[name_target]).ravel() if not self.classify: # REGRESSION # Compute stats results = compute_stats.ols(predicted.tolist(), expected.tolist()) bias = compute_stats.bias(predicted, expected) rmse = compute_stats.rmse(predicted, expected) mae = compute_stats.mae(predicted, expected) # Plot! plot.plot_regression_scatter(expected, np.asarray(predicted), annotate=r'$r^{2}$ ' + '{:0.2f}'.format(results.rsquared) + '\n' + 'peak NDVI date: ' + self.time_peak_ndvi.strftime('%b %d'), xlabel='Expected yield', ylabel='Predicted yield', title=mon_names + ' ' + str(int(df_forecast[self.season].unique()[0])), fname=self.task + '_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # global expected vs predicted if self.debug: # any non-existing index will add row self.df_global.loc[len(self.df_global)] = [np.nanmean(expected), np.nanmean(predicted), mon_names, self.forecast_yr] return predicted, {'RMSE': rmse, 'MAE': mae, r'$r^{2}$': results.rsquared, 'Bias': bias} else: # CLASSIFICATION # Convert from crop condition class (e.g. 4) to string (e.g. exceptional)
else: return predicted, {'RMSE': np.nan, 'MAE': np.nan, r'$r^{2}$': np.nan, 'Bias': np.nan, 'Nash-Sutcliff': np.nan} def do_ml_model(): obj = MLCms(config_file='config_CMS.txt') obj.train_ml_model() if __name__ == '__main__': do_ml_model()
expected, predicted = compute_stats.remove_nans(expected, predicted) cm = confusion_matrix(expected, predicted, labels=self.dict_cc.keys()).T # Compute and plot class probabilities proba_cc = self.model.predict_proba(data) df_proba = pd.DataFrame(proba_cc, columns=self.dict_cc.values()) plot.plot_class_probabilities(df_proba, fname='proba_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # Plot confusion matrix plot.plot_confusion_matrix(cm, normalized=False, fname='cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', ticks=self.dict_cc.values(), out_path=self.path_out_dir) # Normalize and plot confusion matrix cm_normalized = normalize(cm.astype(float), axis=1, norm='l1') plot.plot_confusion_matrix(cm_normalized, fname='norm_cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', normalized=True, ticks=self.dict_cc.values(), out_path=self.path_out_dir) score_accuracy = accuracy_score(expected, predicted) * 100.0 score_precision = precision_score(expected, predicted, average='weighted') * 100.0 return predicted, {'Accuracy': score_accuracy, 'Precision': score_precision}
conditional_block
ml_cms_heights.py
import cPickle import os import pandas as pd import pdb import numpy as np import logging import sys import calendar import matplotlib.pyplot as plt from math import ceil from sklearn.feature_selection import SelectKBest, f_classif, f_regression from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.learning_curve import learning_curve from sklearn.feature_selection import SelectFromModel from sklearn.ensemble.forest import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import normalize from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn import preprocessing from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_predict from sklearn import linear_model from sklearn.svm import SVR from sklearn.ensemble import ExtraTreesRegressor from ConfigParser import SafeConfigParser from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import train_test_split import constants import compute_stats import rgeo import utils import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # Logging cur_flname = os.path.splitext(os.path.basename(__file__))[0] LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt' logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w', format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL # Output to screen logger = logging.getLogger(cur_flname) if not logger.handlers: logger.addHandler(logging.StreamHandler()) # loop_countries # ; Loop over countries x crops # train_ml_model # create_train_df # compute_ml_vars # ; create ml model # loop_forecasting # create_forecast_df # do_forecasting class MLCms: """ """ def __init__(self, config_file=''): # Parse config file self.parser = SafeConfigParser() self.parser.read(config_file) # machine learning specific variables self.classify = constants.DO_CLASSIFICATION # Regress or classify? self.vars_features = constants.fixed_vars self.vars_target = constants.ML_TARGETS if self.classify: self.var_target = constants.ML_TARGETS self.task = 'classification' self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) else: self.var_target = constants.ML_TARGETS self.task = 'regression' self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR() # Get path to input self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl # Output directory is <dir>_<classification>_<2014> self.path_out_dir = constants.out_dir utils.make_dir_if_missing(self.path_out_dir) # Model pickle self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features' def output_model_importance(self, gs, name_gs, num_cols): """ :param gs: :param name_gs: :param num_cols: :return: """ rows_list = [] name_vars = [] feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_ importances = 100.0 * (feature_importance / feature_importance.max()) std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Store feature ranking in a dataframe for f in range(num_cols): dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]} name_vars.append(self.vars_features[indices[f]]) rows_list.append(dict_results) df_results = pd.DataFrame(rows_list) num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols], std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop, title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')', xlabel=name_vars[:num_cols], out_path=self.path_out_dir) df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv') def get_data(self): """ :return: """ df = pd.read_csv(self.path_inp) cols = [col for col in df.columns if col not in self.vars_features] # cols.extend(['DI', 'PI']) # Add information on PI and DI of soils # iterate over each row, get lat and lon # Find corresponding DI and PI lat_lons = zip(df['Long_round'], df['Lat_round']) vals_di = [] vals_pi = [] # for idx, (lon, lat) in enumerate(lat_lons): # print idx, len(lat_lons) # vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif', # lon, lat, replace_ras=False)) # vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif', # lon, lat, replace_ras=False)) # # df['DI'] = vals_di # df['PI'] = vals_pi df = df[cols] data = df.as_matrix(columns=cols[1:]) target = df.as_matrix(columns=[self.var_target]).ravel() # Get training and testing splits splits = train_test_split(data, target, test_size=0.2) return cols, splits def train_ml_model(self):
def do_forecasting(self, df_forecast, mon_names, available_target=False, name_target='yield'): """ 1. Does classification/regression based on already built model. 2. Plots confusion matrix for classification tasks, scatter plot for regression 3. Plots accuracy statistics for classification/regression :param df_forecast: :param mon_names: :param available_target: Is target array available? :param name_target: Name of target array (defaults to yield) :return: """ data = df_forecast.as_matrix(columns=self.vars_features) # convert dataframe column to matrix predicted = self.model.predict(data) if available_target: expected = df_forecast.as_matrix(columns=[name_target]).ravel() if not self.classify: # REGRESSION # Compute stats results = compute_stats.ols(predicted.tolist(), expected.tolist()) bias = compute_stats.bias(predicted, expected) rmse = compute_stats.rmse(predicted, expected) mae = compute_stats.mae(predicted, expected) # Plot! plot.plot_regression_scatter(expected, np.asarray(predicted), annotate=r'$r^{2}$ ' + '{:0.2f}'.format(results.rsquared) + '\n' + 'peak NDVI date: ' + self.time_peak_ndvi.strftime('%b %d'), xlabel='Expected yield', ylabel='Predicted yield', title=mon_names + ' ' + str(int(df_forecast[self.season].unique()[0])), fname=self.task + '_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # global expected vs predicted if self.debug: # any non-existing index will add row self.df_global.loc[len(self.df_global)] = [np.nanmean(expected), np.nanmean(predicted), mon_names, self.forecast_yr] return predicted, {'RMSE': rmse, 'MAE': mae, r'$r^{2}$': results.rsquared, 'Bias': bias} else: # CLASSIFICATION # Convert from crop condition class (e.g. 4) to string (e.g. exceptional) expected, predicted = compute_stats.remove_nans(expected, predicted) cm = confusion_matrix(expected, predicted, labels=self.dict_cc.keys()).T # Compute and plot class probabilities proba_cc = self.model.predict_proba(data) df_proba = pd.DataFrame(proba_cc, columns=self.dict_cc.values()) plot.plot_class_probabilities(df_proba, fname='proba_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # Plot confusion matrix plot.plot_confusion_matrix(cm, normalized=False, fname='cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', ticks=self.dict_cc.values(), out_path=self.path_out_dir) # Normalize and plot confusion matrix cm_normalized = normalize(cm.astype(float), axis=1, norm='l1') plot.plot_confusion_matrix(cm_normalized, fname='norm_cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', normalized=True, ticks=self.dict_cc.values(), out_path=self.path_out_dir) score_accuracy = accuracy_score(expected, predicted) * 100.0 score_precision = precision_score(expected, predicted, average='weighted') * 100.0 return predicted, {'Accuracy': score_accuracy, 'Precision': score_precision} else: return predicted, {'RMSE': np.nan, 'MAE': np.nan, r'$r^{2}$': np.nan, 'Bias': np.nan, 'Nash-Sutcliff': np.nan} def do_ml_model(): obj = MLCms(config_file='config_CMS.txt') obj.train_ml_model() if __name__ == '__main__': do_ml_model()
""" :return: """ logger.info('#########################################################################') logger.info('train_ml_model') logger.info('#########################################################################') ###################################################### # Load dataset ###################################################### cols, splits = self.get_data() data_train, data_test, target_train, target_test = splits # clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu) # #clf = SVR(kernel='rbf', C=1e3, gamma=0.1) # #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) # data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix # #data = preprocessing.scale(data) # target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix # clf.fit(data, target) # # predict_val = clf.predict(after.as_matrix(columns=cols[1:])) # results = compute_stats.ols(predict_val.tolist(), after_target.tolist()) # print results.rsquared # import matplotlib.pyplot as plt # plt.scatter(after_target, predict_val) # plt.show() # pdb.set_trace() if not os.path.isfile(self.path_pickle_model): # For details in scikit workflow: See http://stackoverflow.com/questions/ # 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea # TODO Separate out a dataset so that even the grid search cv can be tested ############################ # Select features from model ############################ logger.info('Selecting important features from model') if self.classify: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) else: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) feat_selection = SelectFromModel(rf_feature_imp) pipeline = Pipeline([ ('fs', feat_selection), ('clf', self.model), ]) ################################# # Grid search for best parameters ################################# C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) logger.info('Tuning hyperparameters') param_grid = { 'fs__threshold': ['mean', 'median'], 'fs__estimator__max_features': ['auto', 'log2'], 'clf__max_features': ['auto', 'log2'], 'clf__n_estimators': [1000, 2000] #'clf__gamma': np.logspace(-9, 3, 13), #'clf__C': np.logspace(-2, 10, 13) } gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan) # Fir the data before getting the best parameter combination. Different data sets will have # different optimized parameter combinations, i.e. without data, there is no optimal parameter combination. gs.fit(data_train, target_train) logger.info(gs.best_params_) data_test = pd.DataFrame(data_test, columns=cols[1:]) # Update features that should be used in model selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]]) cols = selected_features[0] data_test = data_test[cols] # Update model with the best parameters learnt in the previous step self.model = gs.best_estimator_.named_steps['clf'] predict_val = self.model.predict(data_test) results = compute_stats.ols(predict_val.tolist(), target_test.tolist()) print results.rsquared print cols plt.scatter(target_test, predict_val) plt.show() pdb.set_trace() ################################################################### # Output and plot importance of model features, and learning curves ################################################################### self.output_model_importance(gs, 'clf', num_cols=len(cols[1:])) if constants.plot_model_importance: train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold, n_jobs=constants.ncpu) plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve', ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir) # Save the model to disk logger.info('Saving model and features as pickle on disk') with open(self.path_pickle_model, 'wb') as f: cPickle.dump(self.model, f) with open(self.path_pickle_features, 'wb') as f: cPickle.dump(self.vars_features, f) else: # Read model from pickle on disk with open(self.path_pickle_model, 'rb') as f: logger.info('Reading model from pickle on disk') self.model = cPickle.load(f) logger.info('Reading features from pickle on disk') self.vars_features = pd.read_pickle(self.path_pickle_features) return df_cc
identifier_body
ml_cms_heights.py
import cPickle import os import pandas as pd import pdb import numpy as np import logging import sys import calendar import matplotlib.pyplot as plt from math import ceil from sklearn.feature_selection import SelectKBest, f_classif, f_regression from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.learning_curve import learning_curve from sklearn.feature_selection import SelectFromModel from sklearn.ensemble.forest import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import normalize from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn import preprocessing from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_predict from sklearn import linear_model from sklearn.svm import SVR from sklearn.ensemble import ExtraTreesRegressor from ConfigParser import SafeConfigParser from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import train_test_split import constants import compute_stats import rgeo import utils import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # Logging cur_flname = os.path.splitext(os.path.basename(__file__))[0] LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt' logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w', format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL # Output to screen logger = logging.getLogger(cur_flname) if not logger.handlers: logger.addHandler(logging.StreamHandler()) # loop_countries # ; Loop over countries x crops # train_ml_model # create_train_df # compute_ml_vars # ; create ml model # loop_forecasting # create_forecast_df # do_forecasting class MLCms: """ """ def __init__(self, config_file=''): # Parse config file self.parser = SafeConfigParser() self.parser.read(config_file) # machine learning specific variables self.classify = constants.DO_CLASSIFICATION # Regress or classify? self.vars_features = constants.fixed_vars self.vars_target = constants.ML_TARGETS if self.classify: self.var_target = constants.ML_TARGETS self.task = 'classification' self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) else: self.var_target = constants.ML_TARGETS self.task = 'regression' self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR() # Get path to input self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl # Output directory is <dir>_<classification>_<2014> self.path_out_dir = constants.out_dir utils.make_dir_if_missing(self.path_out_dir) # Model pickle self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features' def output_model_importance(self, gs, name_gs, num_cols): """ :param gs: :param name_gs: :param num_cols: :return: """ rows_list = [] name_vars = [] feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_ importances = 100.0 * (feature_importance / feature_importance.max()) std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Store feature ranking in a dataframe for f in range(num_cols): dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]} name_vars.append(self.vars_features[indices[f]]) rows_list.append(dict_results) df_results = pd.DataFrame(rows_list) num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols], std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop, title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')', xlabel=name_vars[:num_cols], out_path=self.path_out_dir) df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv') def get_data(self): """ :return: """ df = pd.read_csv(self.path_inp) cols = [col for col in df.columns if col not in self.vars_features] # cols.extend(['DI', 'PI']) # Add information on PI and DI of soils # iterate over each row, get lat and lon # Find corresponding DI and PI lat_lons = zip(df['Long_round'], df['Lat_round']) vals_di = [] vals_pi = [] # for idx, (lon, lat) in enumerate(lat_lons): # print idx, len(lat_lons) # vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif', # lon, lat, replace_ras=False)) # vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif', # lon, lat, replace_ras=False)) # # df['DI'] = vals_di # df['PI'] = vals_pi df = df[cols] data = df.as_matrix(columns=cols[1:]) target = df.as_matrix(columns=[self.var_target]).ravel() # Get training and testing splits splits = train_test_split(data, target, test_size=0.2) return cols, splits def
(self): """ :return: """ logger.info('#########################################################################') logger.info('train_ml_model') logger.info('#########################################################################') ###################################################### # Load dataset ###################################################### cols, splits = self.get_data() data_train, data_test, target_train, target_test = splits # clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu) # #clf = SVR(kernel='rbf', C=1e3, gamma=0.1) # #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) # data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix # #data = preprocessing.scale(data) # target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix # clf.fit(data, target) # # predict_val = clf.predict(after.as_matrix(columns=cols[1:])) # results = compute_stats.ols(predict_val.tolist(), after_target.tolist()) # print results.rsquared # import matplotlib.pyplot as plt # plt.scatter(after_target, predict_val) # plt.show() # pdb.set_trace() if not os.path.isfile(self.path_pickle_model): # For details in scikit workflow: See http://stackoverflow.com/questions/ # 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea # TODO Separate out a dataset so that even the grid search cv can be tested ############################ # Select features from model ############################ logger.info('Selecting important features from model') if self.classify: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) else: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) feat_selection = SelectFromModel(rf_feature_imp) pipeline = Pipeline([ ('fs', feat_selection), ('clf', self.model), ]) ################################# # Grid search for best parameters ################################# C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) logger.info('Tuning hyperparameters') param_grid = { 'fs__threshold': ['mean', 'median'], 'fs__estimator__max_features': ['auto', 'log2'], 'clf__max_features': ['auto', 'log2'], 'clf__n_estimators': [1000, 2000] #'clf__gamma': np.logspace(-9, 3, 13), #'clf__C': np.logspace(-2, 10, 13) } gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan) # Fir the data before getting the best parameter combination. Different data sets will have # different optimized parameter combinations, i.e. without data, there is no optimal parameter combination. gs.fit(data_train, target_train) logger.info(gs.best_params_) data_test = pd.DataFrame(data_test, columns=cols[1:]) # Update features that should be used in model selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]]) cols = selected_features[0] data_test = data_test[cols] # Update model with the best parameters learnt in the previous step self.model = gs.best_estimator_.named_steps['clf'] predict_val = self.model.predict(data_test) results = compute_stats.ols(predict_val.tolist(), target_test.tolist()) print results.rsquared print cols plt.scatter(target_test, predict_val) plt.show() pdb.set_trace() ################################################################### # Output and plot importance of model features, and learning curves ################################################################### self.output_model_importance(gs, 'clf', num_cols=len(cols[1:])) if constants.plot_model_importance: train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold, n_jobs=constants.ncpu) plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve', ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir) # Save the model to disk logger.info('Saving model and features as pickle on disk') with open(self.path_pickle_model, 'wb') as f: cPickle.dump(self.model, f) with open(self.path_pickle_features, 'wb') as f: cPickle.dump(self.vars_features, f) else: # Read model from pickle on disk with open(self.path_pickle_model, 'rb') as f: logger.info('Reading model from pickle on disk') self.model = cPickle.load(f) logger.info('Reading features from pickle on disk') self.vars_features = pd.read_pickle(self.path_pickle_features) return df_cc def do_forecasting(self, df_forecast, mon_names, available_target=False, name_target='yield'): """ 1. Does classification/regression based on already built model. 2. Plots confusion matrix for classification tasks, scatter plot for regression 3. Plots accuracy statistics for classification/regression :param df_forecast: :param mon_names: :param available_target: Is target array available? :param name_target: Name of target array (defaults to yield) :return: """ data = df_forecast.as_matrix(columns=self.vars_features) # convert dataframe column to matrix predicted = self.model.predict(data) if available_target: expected = df_forecast.as_matrix(columns=[name_target]).ravel() if not self.classify: # REGRESSION # Compute stats results = compute_stats.ols(predicted.tolist(), expected.tolist()) bias = compute_stats.bias(predicted, expected) rmse = compute_stats.rmse(predicted, expected) mae = compute_stats.mae(predicted, expected) # Plot! plot.plot_regression_scatter(expected, np.asarray(predicted), annotate=r'$r^{2}$ ' + '{:0.2f}'.format(results.rsquared) + '\n' + 'peak NDVI date: ' + self.time_peak_ndvi.strftime('%b %d'), xlabel='Expected yield', ylabel='Predicted yield', title=mon_names + ' ' + str(int(df_forecast[self.season].unique()[0])), fname=self.task + '_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # global expected vs predicted if self.debug: # any non-existing index will add row self.df_global.loc[len(self.df_global)] = [np.nanmean(expected), np.nanmean(predicted), mon_names, self.forecast_yr] return predicted, {'RMSE': rmse, 'MAE': mae, r'$r^{2}$': results.rsquared, 'Bias': bias} else: # CLASSIFICATION # Convert from crop condition class (e.g. 4) to string (e.g. exceptional) expected, predicted = compute_stats.remove_nans(expected, predicted) cm = confusion_matrix(expected, predicted, labels=self.dict_cc.keys()).T # Compute and plot class probabilities proba_cc = self.model.predict_proba(data) df_proba = pd.DataFrame(proba_cc, columns=self.dict_cc.values()) plot.plot_class_probabilities(df_proba, fname='proba_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # Plot confusion matrix plot.plot_confusion_matrix(cm, normalized=False, fname='cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', ticks=self.dict_cc.values(), out_path=self.path_out_dir) # Normalize and plot confusion matrix cm_normalized = normalize(cm.astype(float), axis=1, norm='l1') plot.plot_confusion_matrix(cm_normalized, fname='norm_cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', normalized=True, ticks=self.dict_cc.values(), out_path=self.path_out_dir) score_accuracy = accuracy_score(expected, predicted) * 100.0 score_precision = precision_score(expected, predicted, average='weighted') * 100.0 return predicted, {'Accuracy': score_accuracy, 'Precision': score_precision} else: return predicted, {'RMSE': np.nan, 'MAE': np.nan, r'$r^{2}$': np.nan, 'Bias': np.nan, 'Nash-Sutcliff': np.nan} def do_ml_model(): obj = MLCms(config_file='config_CMS.txt') obj.train_ml_model() if __name__ == '__main__': do_ml_model()
train_ml_model
identifier_name
ml_cms_heights.py
import cPickle import os import pandas as pd import pdb import numpy as np import logging import sys import calendar import matplotlib.pyplot as plt from math import ceil from sklearn.feature_selection import SelectKBest, f_classif, f_regression from sklearn.grid_search import GridSearchCV, RandomizedSearchCV from sklearn.learning_curve import learning_curve from sklearn.feature_selection import SelectFromModel from sklearn.ensemble.forest import RandomForestRegressor from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import normalize from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.linear_model import LogisticRegression from sklearn import preprocessing from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.cross_validation import cross_val_predict from sklearn import linear_model from sklearn.svm import SVR from sklearn.ensemble import ExtraTreesRegressor from ConfigParser import SafeConfigParser from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.cross_validation import train_test_split import constants import compute_stats import rgeo import utils import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) # Logging cur_flname = os.path.splitext(os.path.basename(__file__))[0] LOG_FILENAME = constants.log_dir + os.sep + 'Log_' + cur_flname + '.txt' logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO, filemode='w', format='%(asctime)s %(levelname)s %(module)s - %(funcName)s: %(message)s', datefmt="%m-%d %H:%M") # Logging levels are DEBUG, INFO, WARNING, ERROR, and CRITICAL # Output to screen logger = logging.getLogger(cur_flname) if not logger.handlers: logger.addHandler(logging.StreamHandler()) # loop_countries # ; Loop over countries x crops # train_ml_model # create_train_df # compute_ml_vars # ; create ml model # loop_forecasting
class MLCms: """ """ def __init__(self, config_file=''): # Parse config file self.parser = SafeConfigParser() self.parser.read(config_file) # machine learning specific variables self.classify = constants.DO_CLASSIFICATION # Regress or classify? self.vars_features = constants.fixed_vars self.vars_target = constants.ML_TARGETS if self.classify: self.var_target = constants.ML_TARGETS self.task = 'classification' self.model = RandomForestClassifier(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) else: self.var_target = constants.ML_TARGETS self.task = 'regression' self.model = RandomForestRegressor(n_estimators=2500, n_jobs=constants.ncpu, random_state=0) # SVR() # Get path to input self.path_inp = constants.base_dir + os.sep + constants.name_inp_fl # Output directory is <dir>_<classification>_<2014> self.path_out_dir = constants.out_dir utils.make_dir_if_missing(self.path_out_dir) # Model pickle self.path_pickle_model = self.path_out_dir + os.sep + constants.model_pickle self.path_pickle_features = self.path_out_dir + os.sep + 'pickled_features' def output_model_importance(self, gs, name_gs, num_cols): """ :param gs: :param name_gs: :param num_cols: :return: """ rows_list = [] name_vars = [] feature_importance = gs.best_estimator_.named_steps[name_gs].feature_importances_ importances = 100.0 * (feature_importance / feature_importance.max()) std = np.std([tree.feature_importances_ for tree in self.model.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Store feature ranking in a dataframe for f in range(num_cols): dict_results = {'Variable': self.vars_features[indices[f]], 'Importance': importances[indices[f]]} name_vars.append(self.vars_features[indices[f]]) rows_list.append(dict_results) df_results = pd.DataFrame(rows_list) num_cols = 10 if len(indices) > 10 else len(indices) # Plot upto a maximum of 10 features plot.plot_model_importance(num_bars=num_cols, xvals=importances[indices][:num_cols], std=std[indices][:num_cols], fname=self.task + '_importance_' + self.crop, title='Importance of variable (' + self.country + ' ' + self.crop_lname + ')', xlabel=name_vars[:num_cols], out_path=self.path_out_dir) df_results.to_csv(self.path_out_dir + os.sep + self.task + '_importance_' + self.crop + '.csv') def get_data(self): """ :return: """ df = pd.read_csv(self.path_inp) cols = [col for col in df.columns if col not in self.vars_features] # cols.extend(['DI', 'PI']) # Add information on PI and DI of soils # iterate over each row, get lat and lon # Find corresponding DI and PI lat_lons = zip(df['Long_round'], df['Lat_round']) vals_di = [] vals_pi = [] # for idx, (lon, lat) in enumerate(lat_lons): # print idx, len(lat_lons) # vals_pi.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\PI.tif', # lon, lat, replace_ras=False)) # vals_di.append(rgeo.get_value_at_point('C:\\Users\\ritvik\\Documents\\PhD\\Projects\\CMS\\Input\\Soils\\DI.tif', # lon, lat, replace_ras=False)) # # df['DI'] = vals_di # df['PI'] = vals_pi df = df[cols] data = df.as_matrix(columns=cols[1:]) target = df.as_matrix(columns=[self.var_target]).ravel() # Get training and testing splits splits = train_test_split(data, target, test_size=0.2) return cols, splits def train_ml_model(self): """ :return: """ logger.info('#########################################################################') logger.info('train_ml_model') logger.info('#########################################################################') ###################################################### # Load dataset ###################################################### cols, splits = self.get_data() data_train, data_test, target_train, target_test = splits # clf = ExtraTreesRegressor(500, n_jobs=constants.ncpu) # #clf = SVR(kernel='rbf', C=1e3, gamma=0.1) # #clf = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) # data = df_train.as_matrix(columns=cols[1:]) # convert dataframe column to matrix # #data = preprocessing.scale(data) # target = df_train.as_matrix(columns=[self.var_target]).ravel() # convert dataframe column to matrix # clf.fit(data, target) # # predict_val = clf.predict(after.as_matrix(columns=cols[1:])) # results = compute_stats.ols(predict_val.tolist(), after_target.tolist()) # print results.rsquared # import matplotlib.pyplot as plt # plt.scatter(after_target, predict_val) # plt.show() # pdb.set_trace() if not os.path.isfile(self.path_pickle_model): # For details in scikit workflow: See http://stackoverflow.com/questions/ # 35256876/ensuring-right-order-of-operations-in-random-forest-classification-in-scikit-lea # TODO Separate out a dataset so that even the grid search cv can be tested ############################ # Select features from model ############################ logger.info('Selecting important features from model') if self.classify: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) else: rf_feature_imp = ExtraTreesRegressor(150, n_jobs=constants.ncpu) feat_selection = SelectFromModel(rf_feature_imp) pipeline = Pipeline([ ('fs', feat_selection), ('clf', self.model), ]) ################################# # Grid search for best parameters ################################# C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) logger.info('Tuning hyperparameters') param_grid = { 'fs__threshold': ['mean', 'median'], 'fs__estimator__max_features': ['auto', 'log2'], 'clf__max_features': ['auto', 'log2'], 'clf__n_estimators': [1000, 2000] #'clf__gamma': np.logspace(-9, 3, 13), #'clf__C': np.logspace(-2, 10, 13) } gs = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=constants.ncpu, error_score=np.nan) # Fir the data before getting the best parameter combination. Different data sets will have # different optimized parameter combinations, i.e. without data, there is no optimal parameter combination. gs.fit(data_train, target_train) logger.info(gs.best_params_) data_test = pd.DataFrame(data_test, columns=cols[1:]) # Update features that should be used in model selected_features = gs.best_estimator_.named_steps['fs'].transform([cols[1:]]) cols = selected_features[0] data_test = data_test[cols] # Update model with the best parameters learnt in the previous step self.model = gs.best_estimator_.named_steps['clf'] predict_val = self.model.predict(data_test) results = compute_stats.ols(predict_val.tolist(), target_test.tolist()) print results.rsquared print cols plt.scatter(target_test, predict_val) plt.show() pdb.set_trace() ################################################################### # Output and plot importance of model features, and learning curves ################################################################### self.output_model_importance(gs, 'clf', num_cols=len(cols[1:])) if constants.plot_model_importance: train_sizes, train_scores, test_scores = learning_curve(self.model, data, target, cv=k_fold, n_jobs=constants.ncpu) plot.plot_learning_curve(train_scores, test_scores, train_sizes=train_sizes, fname='learning_curve', ylim=(0.0, 1.01), title='Learning curves', out_path=self.path_out_dir) # Save the model to disk logger.info('Saving model and features as pickle on disk') with open(self.path_pickle_model, 'wb') as f: cPickle.dump(self.model, f) with open(self.path_pickle_features, 'wb') as f: cPickle.dump(self.vars_features, f) else: # Read model from pickle on disk with open(self.path_pickle_model, 'rb') as f: logger.info('Reading model from pickle on disk') self.model = cPickle.load(f) logger.info('Reading features from pickle on disk') self.vars_features = pd.read_pickle(self.path_pickle_features) return df_cc def do_forecasting(self, df_forecast, mon_names, available_target=False, name_target='yield'): """ 1. Does classification/regression based on already built model. 2. Plots confusion matrix for classification tasks, scatter plot for regression 3. Plots accuracy statistics for classification/regression :param df_forecast: :param mon_names: :param available_target: Is target array available? :param name_target: Name of target array (defaults to yield) :return: """ data = df_forecast.as_matrix(columns=self.vars_features) # convert dataframe column to matrix predicted = self.model.predict(data) if available_target: expected = df_forecast.as_matrix(columns=[name_target]).ravel() if not self.classify: # REGRESSION # Compute stats results = compute_stats.ols(predicted.tolist(), expected.tolist()) bias = compute_stats.bias(predicted, expected) rmse = compute_stats.rmse(predicted, expected) mae = compute_stats.mae(predicted, expected) # Plot! plot.plot_regression_scatter(expected, np.asarray(predicted), annotate=r'$r^{2}$ ' + '{:0.2f}'.format(results.rsquared) + '\n' + 'peak NDVI date: ' + self.time_peak_ndvi.strftime('%b %d'), xlabel='Expected yield', ylabel='Predicted yield', title=mon_names + ' ' + str(int(df_forecast[self.season].unique()[0])), fname=self.task + '_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # global expected vs predicted if self.debug: # any non-existing index will add row self.df_global.loc[len(self.df_global)] = [np.nanmean(expected), np.nanmean(predicted), mon_names, self.forecast_yr] return predicted, {'RMSE': rmse, 'MAE': mae, r'$r^{2}$': results.rsquared, 'Bias': bias} else: # CLASSIFICATION # Convert from crop condition class (e.g. 4) to string (e.g. exceptional) expected, predicted = compute_stats.remove_nans(expected, predicted) cm = confusion_matrix(expected, predicted, labels=self.dict_cc.keys()).T # Compute and plot class probabilities proba_cc = self.model.predict_proba(data) df_proba = pd.DataFrame(proba_cc, columns=self.dict_cc.values()) plot.plot_class_probabilities(df_proba, fname='proba_' + '_'.join([mon_names]) + '_' + self.crop, out_path=self.path_out_dir) # Plot confusion matrix plot.plot_confusion_matrix(cm, normalized=False, fname='cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', ticks=self.dict_cc.values(), out_path=self.path_out_dir) # Normalize and plot confusion matrix cm_normalized = normalize(cm.astype(float), axis=1, norm='l1') plot.plot_confusion_matrix(cm_normalized, fname='norm_cm_' + '_'.join([mon_names]) + '_' + self.crop, xlabel='True class', ylabel='Predicted class', normalized=True, ticks=self.dict_cc.values(), out_path=self.path_out_dir) score_accuracy = accuracy_score(expected, predicted) * 100.0 score_precision = precision_score(expected, predicted, average='weighted') * 100.0 return predicted, {'Accuracy': score_accuracy, 'Precision': score_precision} else: return predicted, {'RMSE': np.nan, 'MAE': np.nan, r'$r^{2}$': np.nan, 'Bias': np.nan, 'Nash-Sutcliff': np.nan} def do_ml_model(): obj = MLCms(config_file='config_CMS.txt') obj.train_ml_model() if __name__ == '__main__': do_ml_model()
# create_forecast_df # do_forecasting
random_line_split
aws.go
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "bufio" "fmt" "log" "net" "os" "os/exec" "strconv" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" ) var install, create, destroy bool var ec2IP string // awsCmd represents the aws command var awsCmd = &cobra.Command{ Use: "aws", Short: "Manages the infrastructure on AWS", Long: ` Create, delete and show current status of the deployment that is running on AWS. Kindly ensure that terraform is installed also.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { if install { // check if ansible is installed terr, err := exec.LookPath("ansible") if err != nil { log.Fatal("Ansible command not found, kindly check") } fmt.Printf("Found Ansible at %s\n", terr) rr, err := exec.Command("ansible", "--version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) //Start Kubernetes Installation //check if ansible host file exists if _, err := os.Stat("./kubespray/inventory/hosts"); err != nil { fmt.Println("./kubespray/inventory/host inventory file not found") os.Exit(1) } // Copy the configuraton files as indicated in the kubespray docs if _, err := os.Stat("./kubespray/inventory/awscluster"); err == nil { fmt.Println("Configuration folder already exists") } else { //os.MkdirAll("./kubespray/inventory/awscluster/group_vars", 0755) exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run() exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run() //Enable load balancer api access and copy the kubeconfig file locally loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput() if err != nil { fmt.Println("Problem getting the load balancer domain name", err) } else { //Make a copy of kubeconfig on Ansible host f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer f.Close() fmt.Fprintf(f, "kubeconfig_localhost: true\n") g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer g.Close() // Resolve Load Balancer Domain Name and pick the first IP s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput() // Convert the Domain name to string and strip all spaces so that Lookup does not return errors r := string(s) t := strings.TrimSpace(r) fmt.Println(t) node, err := net.LookupHost(t) if err != nil {
ec2IP := node[0] fmt.Println(node) DomainName := strings.TrimSpace(string(loadBalancerName)) loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName fmt.Fprintf(g, "#Set cloud provider to AWS\n") fmt.Fprintf(g, "cloud_provider: 'aws'\n") fmt.Fprintf(g, "#Load Balancer Configuration\n") fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n") fmt.Fprintf(g, "%s\n", loadBalancerDomainName) fmt.Fprintf(g, "loadbalancer_apiserver:\n") fmt.Fprintf(g, " address: %s\n", ec2IP) fmt.Fprintf(g, " port: 6443\n") } } kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache") kubeSet.Dir = "./kubespray/" stdout, _ := kubeSet.StdoutPipe() kubeSet.Stderr = kubeSet.Stdout kubeSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } kubeSet.Wait() os.Exit(0) } if destroy { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Remove ssh bastion file if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil { os.Remove("./kubespray/ssh-bastion.conf") } // Remove the cluster inventory folder err = os.RemoveAll("./kubespray/inventory/awscluster") if err != nil { fmt.Println(err) } // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { fmt.Println("Please enter your AWS access key ID") var awsAccessKeyID string fmt.Scanln(&awsAccessKeyID) fmt.Println("Please enter your AWS SECRET ACCESS KEY") var awsSecretKey string fmt.Scanln(&awsSecretKey) fmt.Println("Please enter your AWS SSH Key Name") var awsAccessSSHKey string fmt.Scanln(&awsAccessSSHKey) fmt.Println("Please enter your AWS Default Region") var awsDefaultRegion string fmt.Scanln(&awsDefaultRegion) file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion) } terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, _ := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout error := terrSet.Start() if error != nil { fmt.Println(error) } scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if create { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") viper.AddConfigPath("/tk8") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsAccessKeyID := viper.GetString("aws.aws_access_key_id") awsSecretKey := viper.GetString("aws.aws_secret_access_key") awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair") awsDefaultRegion := viper.GetString("aws.aws_default_region") file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID)) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey)) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey)) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion)) } // Remove tftvars file err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { fmt.Println(err) } //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsClusterName := viper.GetString("aws.clustername") awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block") awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private") awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public") awsBastionSize := viper.GetString("aws.aws_bastion_size") awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num") awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size") awsEtcdNum := viper.GetString("aws.aws_etcd_num") awsEtcdSize := viper.GetString("aws.aws_etcd_size") awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num") awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size") awsElbAPIPort := viper.GetString("aws.aws_elb_api_port") k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port") kubeInsecureApiserverAddress := viper.GetString("aws.") tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer tfile.Close() fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName)) fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock)) fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate) fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic) fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize)) fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum) fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize)) fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum) fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize)) fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum) fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize)) fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort) fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort) fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress)) fmt.Fprintf(tfile, "default_tags = {\n") fmt.Fprintf(tfile, "# Env = 'devtest'\n") fmt.Fprintf(tfile, "# Product = 'kubernetes'\n") fmt.Fprintf(tfile, "}") //fmt.Println("Please enter your AWS access key ID") //var awsAccessKeyID string //fmt.Scanln(&awsAccessKeyID) //fmt.Println("Please enter your AWS SECRET ACCESS KEY") //var awsSecretKey string //fmt.Scanln(&awsSecretKey) //fmt.Println("Please enter your AWS SSH Key Name") //var awsAccessSSHKey string //fmt.Scanln(&awsAccessSSHKey) //fmt.Println("Please enter your AWS Default Region") //var awsDefaultRegion string //fmt.Scanln(&awsDefaultRegion) terrInit := exec.Command("terraform", "init") terrInit.Dir = "./kubespray/contrib/terraform/aws/" out, _ := terrInit.StdoutPipe() terrInit.Start() scanInit := bufio.NewScanner(out) for scanInit.Scan() { m := scanInit.Text() fmt.Println(m) //log.Printf(m) } terrInit.Wait() terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, err := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout terrSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if len(args) == 0 { cmd.Help() os.Exit(0) } }, } func init() { clusterCmd.AddCommand(awsCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // awsCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // awsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") awsCmd.Flags().BoolVarP(&install, "install", "i", false, "Install Kubernetes on the AWS infrastructure") // Flags to initiate the terraform installation awsCmd.Flags().BoolVarP(&create, "create", "c", false, "Deploy the AWS infrastructure using terraform") // Flag to destroy the AWS infrastructure using terraform awsCmd.Flags().BoolVarP(&destroy, "destroy", "d", false, "Destroy the AWS infrastructure") }
fmt.Println(err) os.Exit(1) }
conditional_block
aws.go
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "bufio" "fmt" "log" "net" "os" "os/exec" "strconv" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" ) var install, create, destroy bool var ec2IP string // awsCmd represents the aws command var awsCmd = &cobra.Command{ Use: "aws", Short: "Manages the infrastructure on AWS", Long: ` Create, delete and show current status of the deployment that is running on AWS. Kindly ensure that terraform is installed also.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { if install { // check if ansible is installed terr, err := exec.LookPath("ansible") if err != nil { log.Fatal("Ansible command not found, kindly check") } fmt.Printf("Found Ansible at %s\n", terr) rr, err := exec.Command("ansible", "--version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) //Start Kubernetes Installation //check if ansible host file exists if _, err := os.Stat("./kubespray/inventory/hosts"); err != nil { fmt.Println("./kubespray/inventory/host inventory file not found") os.Exit(1) } // Copy the configuraton files as indicated in the kubespray docs if _, err := os.Stat("./kubespray/inventory/awscluster"); err == nil { fmt.Println("Configuration folder already exists") } else { //os.MkdirAll("./kubespray/inventory/awscluster/group_vars", 0755) exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run() exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run() //Enable load balancer api access and copy the kubeconfig file locally loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput() if err != nil { fmt.Println("Problem getting the load balancer domain name", err) } else { //Make a copy of kubeconfig on Ansible host f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer f.Close() fmt.Fprintf(f, "kubeconfig_localhost: true\n") g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer g.Close() // Resolve Load Balancer Domain Name and pick the first IP s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput() // Convert the Domain name to string and strip all spaces so that Lookup does not return errors r := string(s) t := strings.TrimSpace(r) fmt.Println(t) node, err := net.LookupHost(t) if err != nil { fmt.Println(err) os.Exit(1) } ec2IP := node[0] fmt.Println(node) DomainName := strings.TrimSpace(string(loadBalancerName)) loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName fmt.Fprintf(g, "#Set cloud provider to AWS\n") fmt.Fprintf(g, "cloud_provider: 'aws'\n") fmt.Fprintf(g, "#Load Balancer Configuration\n") fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n") fmt.Fprintf(g, "%s\n", loadBalancerDomainName) fmt.Fprintf(g, "loadbalancer_apiserver:\n") fmt.Fprintf(g, " address: %s\n", ec2IP) fmt.Fprintf(g, " port: 6443\n") } } kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache") kubeSet.Dir = "./kubespray/" stdout, _ := kubeSet.StdoutPipe() kubeSet.Stderr = kubeSet.Stdout kubeSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } kubeSet.Wait() os.Exit(0) } if destroy { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Remove ssh bastion file if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil { os.Remove("./kubespray/ssh-bastion.conf") } // Remove the cluster inventory folder err = os.RemoveAll("./kubespray/inventory/awscluster") if err != nil { fmt.Println(err) } // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { fmt.Println("Please enter your AWS access key ID") var awsAccessKeyID string fmt.Scanln(&awsAccessKeyID) fmt.Println("Please enter your AWS SECRET ACCESS KEY") var awsSecretKey string fmt.Scanln(&awsSecretKey) fmt.Println("Please enter your AWS SSH Key Name") var awsAccessSSHKey string fmt.Scanln(&awsAccessSSHKey) fmt.Println("Please enter your AWS Default Region") var awsDefaultRegion string fmt.Scanln(&awsDefaultRegion) file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion) } terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, _ := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout error := terrSet.Start() if error != nil { fmt.Println(error) } scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if create { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") viper.AddConfigPath("/tk8") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsAccessKeyID := viper.GetString("aws.aws_access_key_id") awsSecretKey := viper.GetString("aws.aws_secret_access_key") awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair") awsDefaultRegion := viper.GetString("aws.aws_default_region") file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID)) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey)) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey)) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion)) } // Remove tftvars file err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { fmt.Println(err) } //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsClusterName := viper.GetString("aws.clustername") awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block")
awsBastionSize := viper.GetString("aws.aws_bastion_size") awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num") awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size") awsEtcdNum := viper.GetString("aws.aws_etcd_num") awsEtcdSize := viper.GetString("aws.aws_etcd_size") awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num") awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size") awsElbAPIPort := viper.GetString("aws.aws_elb_api_port") k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port") kubeInsecureApiserverAddress := viper.GetString("aws.") tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer tfile.Close() fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName)) fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock)) fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate) fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic) fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize)) fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum) fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize)) fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum) fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize)) fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum) fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize)) fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort) fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort) fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress)) fmt.Fprintf(tfile, "default_tags = {\n") fmt.Fprintf(tfile, "# Env = 'devtest'\n") fmt.Fprintf(tfile, "# Product = 'kubernetes'\n") fmt.Fprintf(tfile, "}") //fmt.Println("Please enter your AWS access key ID") //var awsAccessKeyID string //fmt.Scanln(&awsAccessKeyID) //fmt.Println("Please enter your AWS SECRET ACCESS KEY") //var awsSecretKey string //fmt.Scanln(&awsSecretKey) //fmt.Println("Please enter your AWS SSH Key Name") //var awsAccessSSHKey string //fmt.Scanln(&awsAccessSSHKey) //fmt.Println("Please enter your AWS Default Region") //var awsDefaultRegion string //fmt.Scanln(&awsDefaultRegion) terrInit := exec.Command("terraform", "init") terrInit.Dir = "./kubespray/contrib/terraform/aws/" out, _ := terrInit.StdoutPipe() terrInit.Start() scanInit := bufio.NewScanner(out) for scanInit.Scan() { m := scanInit.Text() fmt.Println(m) //log.Printf(m) } terrInit.Wait() terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, err := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout terrSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if len(args) == 0 { cmd.Help() os.Exit(0) } }, } func init() { clusterCmd.AddCommand(awsCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // awsCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // awsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") awsCmd.Flags().BoolVarP(&install, "install", "i", false, "Install Kubernetes on the AWS infrastructure") // Flags to initiate the terraform installation awsCmd.Flags().BoolVarP(&create, "create", "c", false, "Deploy the AWS infrastructure using terraform") // Flag to destroy the AWS infrastructure using terraform awsCmd.Flags().BoolVarP(&destroy, "destroy", "d", false, "Destroy the AWS infrastructure") }
awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private") awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public")
random_line_split
aws.go
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "bufio" "fmt" "log" "net" "os" "os/exec" "strconv" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" ) var install, create, destroy bool var ec2IP string // awsCmd represents the aws command var awsCmd = &cobra.Command{ Use: "aws", Short: "Manages the infrastructure on AWS", Long: ` Create, delete and show current status of the deployment that is running on AWS. Kindly ensure that terraform is installed also.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { if install { // check if ansible is installed terr, err := exec.LookPath("ansible") if err != nil { log.Fatal("Ansible command not found, kindly check") } fmt.Printf("Found Ansible at %s\n", terr) rr, err := exec.Command("ansible", "--version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) //Start Kubernetes Installation //check if ansible host file exists if _, err := os.Stat("./kubespray/inventory/hosts"); err != nil { fmt.Println("./kubespray/inventory/host inventory file not found") os.Exit(1) } // Copy the configuraton files as indicated in the kubespray docs if _, err := os.Stat("./kubespray/inventory/awscluster"); err == nil { fmt.Println("Configuration folder already exists") } else { //os.MkdirAll("./kubespray/inventory/awscluster/group_vars", 0755) exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run() exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run() //Enable load balancer api access and copy the kubeconfig file locally loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput() if err != nil { fmt.Println("Problem getting the load balancer domain name", err) } else { //Make a copy of kubeconfig on Ansible host f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer f.Close() fmt.Fprintf(f, "kubeconfig_localhost: true\n") g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer g.Close() // Resolve Load Balancer Domain Name and pick the first IP s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput() // Convert the Domain name to string and strip all spaces so that Lookup does not return errors r := string(s) t := strings.TrimSpace(r) fmt.Println(t) node, err := net.LookupHost(t) if err != nil { fmt.Println(err) os.Exit(1) } ec2IP := node[0] fmt.Println(node) DomainName := strings.TrimSpace(string(loadBalancerName)) loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName fmt.Fprintf(g, "#Set cloud provider to AWS\n") fmt.Fprintf(g, "cloud_provider: 'aws'\n") fmt.Fprintf(g, "#Load Balancer Configuration\n") fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n") fmt.Fprintf(g, "%s\n", loadBalancerDomainName) fmt.Fprintf(g, "loadbalancer_apiserver:\n") fmt.Fprintf(g, " address: %s\n", ec2IP) fmt.Fprintf(g, " port: 6443\n") } } kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache") kubeSet.Dir = "./kubespray/" stdout, _ := kubeSet.StdoutPipe() kubeSet.Stderr = kubeSet.Stdout kubeSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } kubeSet.Wait() os.Exit(0) } if destroy { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Remove ssh bastion file if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil { os.Remove("./kubespray/ssh-bastion.conf") } // Remove the cluster inventory folder err = os.RemoveAll("./kubespray/inventory/awscluster") if err != nil { fmt.Println(err) } // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { fmt.Println("Please enter your AWS access key ID") var awsAccessKeyID string fmt.Scanln(&awsAccessKeyID) fmt.Println("Please enter your AWS SECRET ACCESS KEY") var awsSecretKey string fmt.Scanln(&awsSecretKey) fmt.Println("Please enter your AWS SSH Key Name") var awsAccessSSHKey string fmt.Scanln(&awsAccessSSHKey) fmt.Println("Please enter your AWS Default Region") var awsDefaultRegion string fmt.Scanln(&awsDefaultRegion) file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion) } terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, _ := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout error := terrSet.Start() if error != nil { fmt.Println(error) } scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if create { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") viper.AddConfigPath("/tk8") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsAccessKeyID := viper.GetString("aws.aws_access_key_id") awsSecretKey := viper.GetString("aws.aws_secret_access_key") awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair") awsDefaultRegion := viper.GetString("aws.aws_default_region") file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID)) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey)) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey)) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion)) } // Remove tftvars file err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { fmt.Println(err) } //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsClusterName := viper.GetString("aws.clustername") awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block") awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private") awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public") awsBastionSize := viper.GetString("aws.aws_bastion_size") awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num") awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size") awsEtcdNum := viper.GetString("aws.aws_etcd_num") awsEtcdSize := viper.GetString("aws.aws_etcd_size") awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num") awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size") awsElbAPIPort := viper.GetString("aws.aws_elb_api_port") k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port") kubeInsecureApiserverAddress := viper.GetString("aws.") tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer tfile.Close() fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName)) fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock)) fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate) fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic) fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize)) fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum) fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize)) fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum) fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize)) fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum) fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize)) fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort) fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort) fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress)) fmt.Fprintf(tfile, "default_tags = {\n") fmt.Fprintf(tfile, "# Env = 'devtest'\n") fmt.Fprintf(tfile, "# Product = 'kubernetes'\n") fmt.Fprintf(tfile, "}") //fmt.Println("Please enter your AWS access key ID") //var awsAccessKeyID string //fmt.Scanln(&awsAccessKeyID) //fmt.Println("Please enter your AWS SECRET ACCESS KEY") //var awsSecretKey string //fmt.Scanln(&awsSecretKey) //fmt.Println("Please enter your AWS SSH Key Name") //var awsAccessSSHKey string //fmt.Scanln(&awsAccessSSHKey) //fmt.Println("Please enter your AWS Default Region") //var awsDefaultRegion string //fmt.Scanln(&awsDefaultRegion) terrInit := exec.Command("terraform", "init") terrInit.Dir = "./kubespray/contrib/terraform/aws/" out, _ := terrInit.StdoutPipe() terrInit.Start() scanInit := bufio.NewScanner(out) for scanInit.Scan() { m := scanInit.Text() fmt.Println(m) //log.Printf(m) } terrInit.Wait() terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, err := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout terrSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if len(args) == 0 { cmd.Help() os.Exit(0) } }, } func init() {
clusterCmd.AddCommand(awsCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // awsCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // awsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") awsCmd.Flags().BoolVarP(&install, "install", "i", false, "Install Kubernetes on the AWS infrastructure") // Flags to initiate the terraform installation awsCmd.Flags().BoolVarP(&create, "create", "c", false, "Deploy the AWS infrastructure using terraform") // Flag to destroy the AWS infrastructure using terraform awsCmd.Flags().BoolVarP(&destroy, "destroy", "d", false, "Destroy the AWS infrastructure") }
identifier_body
aws.go
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "bufio" "fmt" "log" "net" "os" "os/exec" "strconv" "strings" "github.com/spf13/cobra" "github.com/spf13/viper" ) var install, create, destroy bool var ec2IP string // awsCmd represents the aws command var awsCmd = &cobra.Command{ Use: "aws", Short: "Manages the infrastructure on AWS", Long: ` Create, delete and show current status of the deployment that is running on AWS. Kindly ensure that terraform is installed also.`, Args: cobra.NoArgs, Run: func(cmd *cobra.Command, args []string) { if install { // check if ansible is installed terr, err := exec.LookPath("ansible") if err != nil { log.Fatal("Ansible command not found, kindly check") } fmt.Printf("Found Ansible at %s\n", terr) rr, err := exec.Command("ansible", "--version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) //Start Kubernetes Installation //check if ansible host file exists if _, err := os.Stat("./kubespray/inventory/hosts"); err != nil { fmt.Println("./kubespray/inventory/host inventory file not found") os.Exit(1) } // Copy the configuraton files as indicated in the kubespray docs if _, err := os.Stat("./kubespray/inventory/awscluster"); err == nil { fmt.Println("Configuration folder already exists") } else { //os.MkdirAll("./kubespray/inventory/awscluster/group_vars", 0755) exec.Command("cp", "-rfp", "./kubespray/inventory/sample/", "./kubespray/inventory/awscluster/").Run() exec.Command("cp", "./kubespray/inventory/hosts", "./kubespray/inventory/awscluster/hosts").Run() //Enable load balancer api access and copy the kubeconfig file locally loadBalancerName, err := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2").CombinedOutput() if err != nil { fmt.Println("Problem getting the load balancer domain name", err) } else { //Make a copy of kubeconfig on Ansible host f, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/k8s-cluster.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer f.Close() fmt.Fprintf(f, "kubeconfig_localhost: true\n") g, err := os.OpenFile("./kubespray/inventory/awscluster/group_vars/all.yml", os.O_APPEND|os.O_WRONLY, 0600) if err != nil { panic(err) } defer g.Close() // Resolve Load Balancer Domain Name and pick the first IP s, _ := exec.Command("sh", "-c", "grep apiserver_loadbalancer_domain_name= ./kubespray/inventory/hosts | cut -d'=' -f2 | sed 's/\"//g'").CombinedOutput() // Convert the Domain name to string and strip all spaces so that Lookup does not return errors r := string(s) t := strings.TrimSpace(r) fmt.Println(t) node, err := net.LookupHost(t) if err != nil { fmt.Println(err) os.Exit(1) } ec2IP := node[0] fmt.Println(node) DomainName := strings.TrimSpace(string(loadBalancerName)) loadBalancerDomainName := "apiserver_loadbalancer_domain_name: " + DomainName fmt.Fprintf(g, "#Set cloud provider to AWS\n") fmt.Fprintf(g, "cloud_provider: 'aws'\n") fmt.Fprintf(g, "#Load Balancer Configuration\n") fmt.Fprintf(g, "loadbalancer_apiserver_localhost: false\n") fmt.Fprintf(g, "%s\n", loadBalancerDomainName) fmt.Fprintf(g, "loadbalancer_apiserver:\n") fmt.Fprintf(g, " address: %s\n", ec2IP) fmt.Fprintf(g, " port: 6443\n") } } kubeSet := exec.Command("ansible-playbook", "-i", "./inventory/awscluster/hosts", "./cluster.yml", "--timeout=60", "-e ansible_user=centos", "-e bootstrap_os=centos", "-b", "--become-user=root", "--flush-cache") kubeSet.Dir = "./kubespray/" stdout, _ := kubeSet.StdoutPipe() kubeSet.Stderr = kubeSet.Stdout kubeSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } kubeSet.Wait() os.Exit(0) } if destroy { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Remove ssh bastion file if _, err := os.Stat("./kubespray/ssh-bastion.conf"); err == nil { os.Remove("./kubespray/ssh-bastion.conf") } // Remove the cluster inventory folder err = os.RemoveAll("./kubespray/inventory/awscluster") if err != nil { fmt.Println(err) } // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { fmt.Println("Please enter your AWS access key ID") var awsAccessKeyID string fmt.Scanln(&awsAccessKeyID) fmt.Println("Please enter your AWS SECRET ACCESS KEY") var awsSecretKey string fmt.Scanln(&awsSecretKey) fmt.Println("Please enter your AWS SSH Key Name") var awsAccessSSHKey string fmt.Scanln(&awsAccessSSHKey) fmt.Println("Please enter your AWS Default Region") var awsDefaultRegion string fmt.Scanln(&awsDefaultRegion) file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", awsAccessKeyID) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", awsSecretKey) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", awsAccessSSHKey) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", awsDefaultRegion) } terrSet := exec.Command("terraform", "destroy", "-var-file=credentials.tfvars", "-force") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, _ := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout error := terrSet.Start() if error != nil { fmt.Println(error) } scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if create { // check if terraform is installed terr, err := exec.LookPath("terraform") if err != nil { log.Fatal("Terraform command not found, kindly check") } fmt.Printf("Found terraform at %s\n", terr) rr, err := exec.Command("terraform", "version").Output() if err != nil { log.Fatal(err) } fmt.Printf(string(rr)) // Check if credentials file exist, if it exists skip asking to input the AWS values if _, err := os.Stat("./kubespray/contrib/terraform/aws/credentials.tfvars"); err == nil { fmt.Println("Credentials file already exists, creation skipped") } else { //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") viper.AddConfigPath("/tk8") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsAccessKeyID := viper.GetString("aws.aws_access_key_id") awsSecretKey := viper.GetString("aws.aws_secret_access_key") awsAccessSSHKey := viper.GetString("aws.aws_ssh_keypair") awsDefaultRegion := viper.GetString("aws.aws_default_region") file, err := os.Create("./kubespray/contrib/terraform/aws/credentials.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer file.Close() fmt.Fprintf(file, "AWS_ACCESS_KEY_ID = %s\n", strconv.Quote(awsAccessKeyID)) fmt.Fprintf(file, "AWS_SECRET_ACCESS_KEY = %s\n", strconv.Quote(awsSecretKey)) fmt.Fprintf(file, "AWS_SSH_KEY_NAME = %s\n", strconv.Quote(awsAccessSSHKey)) fmt.Fprintf(file, "AWS_DEFAULT_REGION = %s\n", strconv.Quote(awsDefaultRegion)) } // Remove tftvars file err = os.Remove("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { fmt.Println(err) } //Read Configuration File viper.SetConfigName("config") viper.AddConfigPath(".") verr := viper.ReadInConfig() // Find and read the config file if verr != nil { // Handle errors reading the config file panic(fmt.Errorf("fatal error config file: %s", verr)) } awsClusterName := viper.GetString("aws.clustername") awsVpcCidrBlock := viper.GetString("aws.aws_vpc_cidr_block") awsCidrSubnetsPrivate := viper.GetString("aws.aws_cidr_subnets_private") awsCidrSubnetsPublic := viper.GetString("aws.aws_cidr_subnets_public") awsBastionSize := viper.GetString("aws.aws_bastion_size") awsKubeMasterNum := viper.GetString("aws.aws_kube_master_num") awsKubeMasterSize := viper.GetString("aws.aws_kube_master_size") awsEtcdNum := viper.GetString("aws.aws_etcd_num") awsEtcdSize := viper.GetString("aws.aws_etcd_size") awsKubeWorkerNum := viper.GetString("aws.aws_kube_worker_num") awsKubeWorkerSize := viper.GetString("aws.aws_kube_worker_size") awsElbAPIPort := viper.GetString("aws.aws_elb_api_port") k8sSecureAPIPort := viper.GetString("aws.k8s_secure_api_port") kubeInsecureApiserverAddress := viper.GetString("aws.") tfile, err := os.Create("./kubespray/contrib/terraform/aws/terraform.tfvars") if err != nil { log.Fatal("Cannot create file", err) } defer tfile.Close() fmt.Fprintf(tfile, "aws_cluster_name = %s\n", strconv.Quote(awsClusterName)) fmt.Fprintf(tfile, "aws_vpc_cidr_block = %s\n", strconv.Quote(awsVpcCidrBlock)) fmt.Fprintf(tfile, "aws_cidr_subnets_private = %s\n", awsCidrSubnetsPrivate) fmt.Fprintf(tfile, "aws_cidr_subnets_public = %s\n", awsCidrSubnetsPublic) fmt.Fprintf(tfile, "aws_bastion_size = %s\n", strconv.Quote(awsBastionSize)) fmt.Fprintf(tfile, "aws_kube_master_num = %s\n", awsKubeMasterNum) fmt.Fprintf(tfile, "aws_kube_master_size = %s\n", strconv.Quote(awsKubeMasterSize)) fmt.Fprintf(tfile, "aws_etcd_num = %s\n", awsEtcdNum) fmt.Fprintf(tfile, "aws_etcd_size = %s\n", strconv.Quote(awsEtcdSize)) fmt.Fprintf(tfile, "aws_kube_worker_num = %s\n", awsKubeWorkerNum) fmt.Fprintf(tfile, "aws_kube_worker_size = %s\n", strconv.Quote(awsKubeWorkerSize)) fmt.Fprintf(tfile, "aws_elb_api_port = %s\n", awsElbAPIPort) fmt.Fprintf(tfile, "k8s_secure_api_port = %s\n", k8sSecureAPIPort) fmt.Fprintf(tfile, "kube_insecure_apiserver_address = %s\n", strconv.Quote(kubeInsecureApiserverAddress)) fmt.Fprintf(tfile, "default_tags = {\n") fmt.Fprintf(tfile, "# Env = 'devtest'\n") fmt.Fprintf(tfile, "# Product = 'kubernetes'\n") fmt.Fprintf(tfile, "}") //fmt.Println("Please enter your AWS access key ID") //var awsAccessKeyID string //fmt.Scanln(&awsAccessKeyID) //fmt.Println("Please enter your AWS SECRET ACCESS KEY") //var awsSecretKey string //fmt.Scanln(&awsSecretKey) //fmt.Println("Please enter your AWS SSH Key Name") //var awsAccessSSHKey string //fmt.Scanln(&awsAccessSSHKey) //fmt.Println("Please enter your AWS Default Region") //var awsDefaultRegion string //fmt.Scanln(&awsDefaultRegion) terrInit := exec.Command("terraform", "init") terrInit.Dir = "./kubespray/contrib/terraform/aws/" out, _ := terrInit.StdoutPipe() terrInit.Start() scanInit := bufio.NewScanner(out) for scanInit.Scan() { m := scanInit.Text() fmt.Println(m) //log.Printf(m) } terrInit.Wait() terrSet := exec.Command("terraform", "apply", "-var-file=credentials.tfvars", "-auto-approve") terrSet.Dir = "./kubespray/contrib/terraform/aws/" stdout, err := terrSet.StdoutPipe() terrSet.Stderr = terrSet.Stdout terrSet.Start() scanner := bufio.NewScanner(stdout) for scanner.Scan() { m := scanner.Text() fmt.Println(m) //log.Printf(m) } terrSet.Wait() os.Exit(0) } if len(args) == 0 { cmd.Help() os.Exit(0) } }, } func i
) { clusterCmd.AddCommand(awsCmd) // Here you will define your flags and configuration settings. // Cobra supports Persistent Flags which will work for this command // and all subcommands, e.g.: // awsCmd.PersistentFlags().String("foo", "", "A help for foo") // Cobra supports local flags which will only run when this command // is called directly, e.g.: // awsCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") awsCmd.Flags().BoolVarP(&install, "install", "i", false, "Install Kubernetes on the AWS infrastructure") // Flags to initiate the terraform installation awsCmd.Flags().BoolVarP(&create, "create", "c", false, "Deploy the AWS infrastructure using terraform") // Flag to destroy the AWS infrastructure using terraform awsCmd.Flags().BoolVarP(&destroy, "destroy", "d", false, "Destroy the AWS infrastructure") }
nit(
identifier_name
raft.go
package raft // // this is an outline of the API that raft must expose to // the service (or tester). see comments below for // each of these functions for more details. // // rf = Make(...) // create a new Raft server. // rf.Start(command interface{}) (index, Term, isleader) // start agreement on a new log entry // rf.GetState() (Term, isLeader) // ask a Raft for its current Term, and whether it thinks it is leader // ApplyMsg // each time a new entry is committed to the log, each Raft peer // should send an ApplyMsg to the service (or tester) // in the same server. // import ( "bytes" "encoding/gob" "math/rand" "sync" "time" ) import "labrpc" // import "bytes" // import "encoding/gob" const ( STATE_FOLLOWER = 0 STATE_LEADER = 1 STATE_CANDIDATE = 2 SEND_ENTRY_INTERVAL = 50 * time.Millisecond ) ///////tell leader that the commited log can be applied. // as each Raft peer becomes aware that successive log Entries are // committed, the peer should send an ApplyMsg to the service (or // tester) on the same server, via the applyCh passed to Make(). // type ApplyMsg struct { CommandIndex int // index of log array. Command interface{} UseSnapshot bool // ignore for lab2; only used in lab3 Snapshot []byte // ignore for lab2; only used in lab3 CommandValid bool } type logEntries struct { LogIndex int Term int Log interface{} } // // A Go object implementing a single Raft peer. // type Raft struct { mtx sync.Mutex peers []*labrpc.ClientEnd persister *Persister me int // index into peers[] // Your data here. // Look at the paper's Figure 2 for a description of what // state a Raft server must maintain. // persistent state for all state currentTerm int voteFor int log []logEntries // volatile state: first commit -> then apply commitIndex int // init to 0: the index that have been commited. lastApplied int // init to 0: the index that have been applied. // only for leaders nextIndex []int // init to [leader last applied log index + 1] matchIndex []int // init to 0; for each server, index of highest log entry known to be replicated // other fields state int beenVotedCount int // channels chanHeartBeat chan int chanBecomeLeader chan int chanCommit chan int chanVoteOther chan int chanApply chan ApplyMsg } // return currentTerm and whether this server // believes it is the leader. func (rf *Raft) GetState() (int, bool) { // Your code here. //rf.mtx.Lock() //defer rf.mtx.Unlock() return rf.currentTerm, rf.state == STATE_LEADER } // // save Raft's persistent state to stable storage, // where it can later be retrieved after a crash and restart. // see paper's Figure 2 for a description of what should be persistent. // func (rf *Raft) persist() { // Your code here. // Example: // w := new(bytes.Buffer) // e := gob.NewEncoder(w) // e.Encode(rf.xxx) // e.Encode(rf.yyy) // data := w.Bytes() // rf.persister.SaveRaftState(data) w := new(bytes.Buffer) e := gob.NewEncoder(w) e.Encode(rf.currentTerm) e.Encode(rf.voteFor) e.Encode(rf.log) data := w.Bytes() rf.persister.SaveRaftState(data) } // // restore previously persisted state. // func (rf *Raft) readPersist(data []byte) { // Your code here. // Example: // r := bytes.NewBuffer(data) // d := gob.NewDecoder(r) // d.Decode(&rf.xxx) // d.Decode(&rf.yyy) r := bytes.NewBuffer(data) d := gob.NewDecoder(r) d.Decode(&rf.currentTerm) d.Decode(&rf.voteFor) d.Decode(&rf.log) } // example RequestVote RPC arguments structure. type RequestVoteArgs struct { // Your data here. Term int CandidateId int LastLogIndex int LastLogTerm int } // example RequestVote RPC reply structure. type RequestVoteReply struct { // Your data here. Term int VoteGranted bool } type RequestAppendEntriesArgs struct { Term int LeaderId int PrevLogIndex int PrevLogTerm int Entries []logEntries LeaderCommitIndex int /// prevLogIndex, CommitIndex. } type RequestAppendEntriesReply struct { Term int Success bool NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time. } // // example RequestVote RPC handler. // // rpc request/response should check term to convert self to follower; // should check peer's log info to vote peer. func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) { //fmt.Printf("[::RequestVote]\n") // Your code here. rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() reply.VoteGranted = false // case 1: check term if args.Term < rf.currentTerm { reply.Term = rf.currentTerm return } if args.Term > rf.currentTerm { // set term to max. and then maybe become leader. rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = rf.currentTerm // case 2: check log isNewer := false if args.LastLogTerm == rf.log[len(rf.log)-1].Term { isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex } else { isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term } if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer { rf.chanVoteOther <- 1 rf.state = STATE_FOLLOWER reply.VoteGranted = true rf.voteFor = args.CandidateId } } func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) { // Your code here. // Q: should candidate append entries? //fmt.Println("[::RequestAppendEntries]", args) rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() // case 1: check term reply.Success = false if args.Term < rf.currentTerm { reply.Term = rf.currentTerm reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } rf.chanHeartBeat <- 1 if args.Term > rf.currentTerm { rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = args.Term // case 2: check log number if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex { reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } // case 3: check log term. decrease one each time... if args.PrevLogIndex > 0 { term := rf.log[args.PrevLogIndex].Term if args.PrevLogTerm != term { for i := args.PrevLogIndex - 1; i >= 0; i-- { if rf.log[i].Term != term { reply.NextIndex = i + 1 break } } return } } // step4: success: copy the log. if args.PrevLogIndex < 0 { } else { rf.log = rf.log[:args.PrevLogIndex+1] rf.log = append(rf.log, args.Entries...) reply.Success = true reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 } if args.LeaderCommitIndex > rf.commitIndex { last := rf.log[len(rf.log)-1].LogIndex if args.LeaderCommitIndex > last { rf.commitIndex = last } else { rf.commitIndex = args.LeaderCommitIndex } rf.chanCommit <- 1 } return } // // example code to send a RequestVote RPC to a server. // server is the index of the target server in rf.peers[]. // expects RPC arguments in args. // fills in *reply with RPC reply, so caller should // pass &reply. // the types of the args and reply passed to Call() must be // the same as the types of the arguments declared in the // handler function (including whether they are pointers). // // returns true if labrpc says the RPC was delivered. // // if you're having trouble getting RPC to work, check that you've // capitalized all field names in structs passed over RPC, and // that the caller passes the address of the reply struct with &, not // the struct itself. // func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool { ok := rf.peers[server].Call("Raft.RequestVote", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_CANDIDATE { return ok } if args.Term != rf.currentTerm { // consider the current term's reply return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() } if reply.VoteGranted { rf.beenVotedCount++ if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 { rf.state = STATE_FOLLOWER // ... rf.chanBecomeLeader <- 1 } } } return ok } func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool { //fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term) ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_LEADER { return ok } if args.Term != rf.currentTerm { return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() return ok } if reply.Success { if len(args.Entries) > 0 { rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1 rf.matchIndex[server] = rf.nextIndex[server] - 1 } } else { rf.nextIndex[server] = reply.NextIndex } } return ok } func (rf *Raft) broadcastRequestVote() { var args RequestVoteArgs rf.mtx.Lock() args.Term = rf.currentTerm args.CandidateId = rf.me args.LastLogTerm = rf.log[len(rf.log)-1].Term args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex rf.mtx.Unlock() //fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me) for i := range rf.peers
} func (rf *Raft) broadcastAppendEntries() { //fmt.Printf("[::broadcastAppendEntries][Candidate = %v]", rf.me) rf.mtx.Lock() defer rf.mtx.Unlock() N := rf.commitIndex last := rf.log[len(rf.log)-1].LogIndex // step1: iterate all commitLog for i := rf.commitIndex + 1; i <= last; i++ { num := 1 for j := range rf.peers { if j != rf.me && rf.matchIndex[j] >= i && rf.log[i].Term == rf.currentTerm { num++ } } // replicated in majority of node. if 2*num > len(rf.peers) { N = i } } // step2: we can apply these logs. if N != rf.commitIndex { rf.commitIndex = N rf.chanCommit <- 1 /// majority of nodes have commited, then we can move applyIndex // } for i := range rf.peers { if i != rf.me && rf.state == STATE_LEADER { if rf.nextIndex[i] > 0 { // step3: nextIndex[node i] until the end. var args RequestAppendEntriesArgs args.Term = rf.currentTerm args.LeaderId = rf.me args.PrevLogIndex = rf.nextIndex[i] - 1 args.PrevLogTerm = rf.log[args.PrevLogIndex].Term args.Entries = make([]logEntries, len(rf.log[args.PrevLogIndex+1:])) copy(args.Entries, rf.log[args.PrevLogIndex+1:]) args.LeaderCommitIndex = rf.commitIndex go func(i int, args RequestAppendEntriesArgs) { var reply RequestAppendEntriesReply rf.sendAppendEntries(i, args, &reply) }(i, args) } } } } // // the service using Raft (e.g. a k/v server) wants to start // agreement on the next command to be appended to Raft's log. if this // server isn't the leader, returns false. otherwise start the // agreement and return immediately. there is no guarantee that this // command will ever be committed to the Raft log, since the leader // may fail or lose an election. // // the first return value is the index that the command will appear at // if it's ever committed. the second return value is the current // term. the third return value is true if this server believes it is // the leader. // func (rf *Raft) Start(command interface{}) (int, int, bool) { rf.mtx.Lock() defer rf.mtx.Unlock() index := -1 term := rf.currentTerm isLeader := rf.state == STATE_LEADER if isLeader { index = rf.log[len(rf.log)-1].LogIndex + 1 rf.log = append(rf.log, logEntries{Term: term, Log: command, LogIndex: index}) // append new entry from client rf.persist() } return index, term, isLeader } // // the tester calls Kill() when a Raft instance won't // be needed again. you are not required to do anything // in Kill(), but it might be convenient to (for example) // turn off debug output from this instance. // func (rf *Raft) Kill() { // Your code here, if desired. } func (rf *Raft) working() { for { switch rf.state { case STATE_LEADER: rf.broadcastAppendEntries() time.Sleep(SEND_ENTRY_INTERVAL) case STATE_FOLLOWER: select { case <-rf.chanHeartBeat: // RequestAppendEntries case <-rf.chanVoteOther: // RequestVote case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): rf.state = STATE_CANDIDATE } case STATE_CANDIDATE: rf.mtx.Lock() rf.currentTerm++ rf.voteFor = rf.me // voteFor changed with currentTerm. rf.beenVotedCount = 1 rf.persist() rf.mtx.Unlock() go rf.broadcastRequestVote() select { case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): case <-rf.chanHeartBeat: rf.state = STATE_FOLLOWER case <-rf.chanBecomeLeader: rf.mtx.Lock() rf.state = STATE_LEADER rf.nextIndex = make([]int, len(rf.peers)) rf.matchIndex = make([]int, len(rf.peers)) for i := range rf.peers { rf.nextIndex[i] = rf.log[len(rf.log)-1].LogIndex + 1 // initialize. rf.matchIndex[i] = 0 } rf.mtx.Unlock() } } } } // // the service or tester wants to create a Raft server. the ports // of all the Raft servers (including this one) are in peers[]. this // server's port is peers[me]. all the servers' peers[] arrays // have the same order. persister is a place for this server to // save its persistent state, and also initially holds the most // recent saved state, if any. applyCh is a channel on which the // tester or service expects Raft to send ApplyMsg messages. // Make() must return quickly, so it should start goroutines // for any long-running work. // func Make(peers []*labrpc.ClientEnd, me int, persister *Persister, applyCh chan ApplyMsg) *Raft { rf := &Raft{} rf.peers = peers rf.persister = persister rf.me = me // Your initialization code here. rf.state = STATE_FOLLOWER rf.currentTerm = 0 rf.voteFor = -1 rf.log = append(rf.log, logEntries{Term: 0}) rf.commitIndex = 0 rf.lastApplied = 0 rf.chanBecomeLeader = make(chan int, 100) rf.chanCommit = make(chan int, 100) rf.chanHeartBeat = make(chan int, 100) rf.chanVoteOther = make(chan int, 100) rf.chanApply = applyCh // initialize from state persisted before a crash rf.readPersist(persister.ReadRaftState()) go rf.working() go func() { for { select { case <-rf.chanCommit: rf.mtx.Lock() commitIndex := rf.commitIndex for i := rf.lastApplied + 1; i <= commitIndex; i++ { msg := ApplyMsg{CommandIndex: i, Command: rf.log[i].Log, CommandValid: true} applyCh <- msg rf.lastApplied = i } rf.mtx.Unlock() } } }() return rf }
{ if i != rf.me && rf.state == STATE_CANDIDATE { go func(i int) { var reply RequestVoteReply rf.sendRequestVote(i, args, &reply) }(i) } }
conditional_block
raft.go
package raft // // this is an outline of the API that raft must expose to // the service (or tester). see comments below for // each of these functions for more details. // // rf = Make(...) // create a new Raft server. // rf.Start(command interface{}) (index, Term, isleader) // start agreement on a new log entry // rf.GetState() (Term, isLeader) // ask a Raft for its current Term, and whether it thinks it is leader // ApplyMsg // each time a new entry is committed to the log, each Raft peer // should send an ApplyMsg to the service (or tester) // in the same server. // import ( "bytes" "encoding/gob" "math/rand" "sync" "time" ) import "labrpc" // import "bytes" // import "encoding/gob" const ( STATE_FOLLOWER = 0 STATE_LEADER = 1 STATE_CANDIDATE = 2 SEND_ENTRY_INTERVAL = 50 * time.Millisecond ) ///////tell leader that the commited log can be applied. // as each Raft peer becomes aware that successive log Entries are // committed, the peer should send an ApplyMsg to the service (or // tester) on the same server, via the applyCh passed to Make(). // type ApplyMsg struct { CommandIndex int // index of log array. Command interface{} UseSnapshot bool // ignore for lab2; only used in lab3 Snapshot []byte // ignore for lab2; only used in lab3 CommandValid bool } type logEntries struct { LogIndex int Term int Log interface{} } // // A Go object implementing a single Raft peer. // type Raft struct { mtx sync.Mutex peers []*labrpc.ClientEnd persister *Persister me int // index into peers[] // Your data here. // Look at the paper's Figure 2 for a description of what // state a Raft server must maintain. // persistent state for all state currentTerm int voteFor int log []logEntries // volatile state: first commit -> then apply commitIndex int // init to 0: the index that have been commited. lastApplied int // init to 0: the index that have been applied. // only for leaders nextIndex []int // init to [leader last applied log index + 1] matchIndex []int // init to 0; for each server, index of highest log entry known to be replicated // other fields state int beenVotedCount int // channels chanHeartBeat chan int chanBecomeLeader chan int chanCommit chan int chanVoteOther chan int chanApply chan ApplyMsg } // return currentTerm and whether this server // believes it is the leader. func (rf *Raft) GetState() (int, bool) { // Your code here. //rf.mtx.Lock() //defer rf.mtx.Unlock() return rf.currentTerm, rf.state == STATE_LEADER } // // save Raft's persistent state to stable storage, // where it can later be retrieved after a crash and restart. // see paper's Figure 2 for a description of what should be persistent. // func (rf *Raft) persist() { // Your code here. // Example: // w := new(bytes.Buffer) // e := gob.NewEncoder(w) // e.Encode(rf.xxx) // e.Encode(rf.yyy) // data := w.Bytes() // rf.persister.SaveRaftState(data) w := new(bytes.Buffer) e := gob.NewEncoder(w) e.Encode(rf.currentTerm) e.Encode(rf.voteFor) e.Encode(rf.log) data := w.Bytes() rf.persister.SaveRaftState(data) } // // restore previously persisted state. // func (rf *Raft) readPersist(data []byte) { // Your code here. // Example: // r := bytes.NewBuffer(data) // d := gob.NewDecoder(r) // d.Decode(&rf.xxx) // d.Decode(&rf.yyy) r := bytes.NewBuffer(data) d := gob.NewDecoder(r) d.Decode(&rf.currentTerm) d.Decode(&rf.voteFor) d.Decode(&rf.log) } // example RequestVote RPC arguments structure. type RequestVoteArgs struct { // Your data here. Term int CandidateId int LastLogIndex int LastLogTerm int } // example RequestVote RPC reply structure. type RequestVoteReply struct { // Your data here. Term int VoteGranted bool } type RequestAppendEntriesArgs struct { Term int LeaderId int PrevLogIndex int PrevLogTerm int Entries []logEntries LeaderCommitIndex int /// prevLogIndex, CommitIndex. } type RequestAppendEntriesReply struct { Term int Success bool NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time. } // // example RequestVote RPC handler. // // rpc request/response should check term to convert self to follower; // should check peer's log info to vote peer. func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) { //fmt.Printf("[::RequestVote]\n") // Your code here. rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() reply.VoteGranted = false // case 1: check term if args.Term < rf.currentTerm { reply.Term = rf.currentTerm return } if args.Term > rf.currentTerm { // set term to max. and then maybe become leader. rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = rf.currentTerm // case 2: check log isNewer := false if args.LastLogTerm == rf.log[len(rf.log)-1].Term { isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex } else { isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term } if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer { rf.chanVoteOther <- 1 rf.state = STATE_FOLLOWER reply.VoteGranted = true rf.voteFor = args.CandidateId } } func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) { // Your code here. // Q: should candidate append entries? //fmt.Println("[::RequestAppendEntries]", args) rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() // case 1: check term reply.Success = false if args.Term < rf.currentTerm { reply.Term = rf.currentTerm reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } rf.chanHeartBeat <- 1 if args.Term > rf.currentTerm { rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = args.Term // case 2: check log number if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex { reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } // case 3: check log term. decrease one each time... if args.PrevLogIndex > 0 { term := rf.log[args.PrevLogIndex].Term if args.PrevLogTerm != term { for i := args.PrevLogIndex - 1; i >= 0; i-- { if rf.log[i].Term != term { reply.NextIndex = i + 1 break } } return } } // step4: success: copy the log. if args.PrevLogIndex < 0 { } else { rf.log = rf.log[:args.PrevLogIndex+1] rf.log = append(rf.log, args.Entries...) reply.Success = true reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 } if args.LeaderCommitIndex > rf.commitIndex { last := rf.log[len(rf.log)-1].LogIndex if args.LeaderCommitIndex > last { rf.commitIndex = last } else { rf.commitIndex = args.LeaderCommitIndex } rf.chanCommit <- 1 } return } // // example code to send a RequestVote RPC to a server. // server is the index of the target server in rf.peers[]. // expects RPC arguments in args. // fills in *reply with RPC reply, so caller should // pass &reply. // the types of the args and reply passed to Call() must be // the same as the types of the arguments declared in the // handler function (including whether they are pointers). // // returns true if labrpc says the RPC was delivered. // // if you're having trouble getting RPC to work, check that you've // capitalized all field names in structs passed over RPC, and // that the caller passes the address of the reply struct with &, not // the struct itself. // func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool { ok := rf.peers[server].Call("Raft.RequestVote", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_CANDIDATE { return ok } if args.Term != rf.currentTerm { // consider the current term's reply return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() } if reply.VoteGranted { rf.beenVotedCount++ if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 { rf.state = STATE_FOLLOWER // ... rf.chanBecomeLeader <- 1 } } } return ok } func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool { //fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term) ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_LEADER { return ok } if args.Term != rf.currentTerm { return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() return ok } if reply.Success { if len(args.Entries) > 0 { rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1 rf.matchIndex[server] = rf.nextIndex[server] - 1 } } else { rf.nextIndex[server] = reply.NextIndex } } return ok } func (rf *Raft) broadcastRequestVote() { var args RequestVoteArgs rf.mtx.Lock() args.Term = rf.currentTerm args.CandidateId = rf.me args.LastLogTerm = rf.log[len(rf.log)-1].Term args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex rf.mtx.Unlock() //fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me) for i := range rf.peers { if i != rf.me && rf.state == STATE_CANDIDATE { go func(i int) { var reply RequestVoteReply rf.sendRequestVote(i, args, &reply) }(i) } } } func (rf *Raft) broadcastAppendEntries() { //fmt.Printf("[::broadcastAppendEntries][Candidate = %v]", rf.me) rf.mtx.Lock() defer rf.mtx.Unlock() N := rf.commitIndex last := rf.log[len(rf.log)-1].LogIndex // step1: iterate all commitLog for i := rf.commitIndex + 1; i <= last; i++ { num := 1 for j := range rf.peers { if j != rf.me && rf.matchIndex[j] >= i && rf.log[i].Term == rf.currentTerm { num++ } } // replicated in majority of node. if 2*num > len(rf.peers) { N = i } } // step2: we can apply these logs. if N != rf.commitIndex { rf.commitIndex = N rf.chanCommit <- 1 /// majority of nodes have commited, then we can move applyIndex // } for i := range rf.peers { if i != rf.me && rf.state == STATE_LEADER { if rf.nextIndex[i] > 0 { // step3: nextIndex[node i] until the end. var args RequestAppendEntriesArgs args.Term = rf.currentTerm args.LeaderId = rf.me args.PrevLogIndex = rf.nextIndex[i] - 1 args.PrevLogTerm = rf.log[args.PrevLogIndex].Term args.Entries = make([]logEntries, len(rf.log[args.PrevLogIndex+1:])) copy(args.Entries, rf.log[args.PrevLogIndex+1:]) args.LeaderCommitIndex = rf.commitIndex go func(i int, args RequestAppendEntriesArgs) { var reply RequestAppendEntriesReply rf.sendAppendEntries(i, args, &reply) }(i, args) } } } } // // the service using Raft (e.g. a k/v server) wants to start // agreement on the next command to be appended to Raft's log. if this // server isn't the leader, returns false. otherwise start the // agreement and return immediately. there is no guarantee that this // command will ever be committed to the Raft log, since the leader // may fail or lose an election. // // the first return value is the index that the command will appear at // if it's ever committed. the second return value is the current // term. the third return value is true if this server believes it is // the leader. // func (rf *Raft) Start(command interface{}) (int, int, bool) { rf.mtx.Lock() defer rf.mtx.Unlock() index := -1 term := rf.currentTerm isLeader := rf.state == STATE_LEADER if isLeader { index = rf.log[len(rf.log)-1].LogIndex + 1 rf.log = append(rf.log, logEntries{Term: term, Log: command, LogIndex: index}) // append new entry from client rf.persist() } return index, term, isLeader } // // the tester calls Kill() when a Raft instance won't // be needed again. you are not required to do anything // in Kill(), but it might be convenient to (for example) // turn off debug output from this instance. // func (rf *Raft) Kill()
func (rf *Raft) working() { for { switch rf.state { case STATE_LEADER: rf.broadcastAppendEntries() time.Sleep(SEND_ENTRY_INTERVAL) case STATE_FOLLOWER: select { case <-rf.chanHeartBeat: // RequestAppendEntries case <-rf.chanVoteOther: // RequestVote case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): rf.state = STATE_CANDIDATE } case STATE_CANDIDATE: rf.mtx.Lock() rf.currentTerm++ rf.voteFor = rf.me // voteFor changed with currentTerm. rf.beenVotedCount = 1 rf.persist() rf.mtx.Unlock() go rf.broadcastRequestVote() select { case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): case <-rf.chanHeartBeat: rf.state = STATE_FOLLOWER case <-rf.chanBecomeLeader: rf.mtx.Lock() rf.state = STATE_LEADER rf.nextIndex = make([]int, len(rf.peers)) rf.matchIndex = make([]int, len(rf.peers)) for i := range rf.peers { rf.nextIndex[i] = rf.log[len(rf.log)-1].LogIndex + 1 // initialize. rf.matchIndex[i] = 0 } rf.mtx.Unlock() } } } } // // the service or tester wants to create a Raft server. the ports // of all the Raft servers (including this one) are in peers[]. this // server's port is peers[me]. all the servers' peers[] arrays // have the same order. persister is a place for this server to // save its persistent state, and also initially holds the most // recent saved state, if any. applyCh is a channel on which the // tester or service expects Raft to send ApplyMsg messages. // Make() must return quickly, so it should start goroutines // for any long-running work. // func Make(peers []*labrpc.ClientEnd, me int, persister *Persister, applyCh chan ApplyMsg) *Raft { rf := &Raft{} rf.peers = peers rf.persister = persister rf.me = me // Your initialization code here. rf.state = STATE_FOLLOWER rf.currentTerm = 0 rf.voteFor = -1 rf.log = append(rf.log, logEntries{Term: 0}) rf.commitIndex = 0 rf.lastApplied = 0 rf.chanBecomeLeader = make(chan int, 100) rf.chanCommit = make(chan int, 100) rf.chanHeartBeat = make(chan int, 100) rf.chanVoteOther = make(chan int, 100) rf.chanApply = applyCh // initialize from state persisted before a crash rf.readPersist(persister.ReadRaftState()) go rf.working() go func() { for { select { case <-rf.chanCommit: rf.mtx.Lock() commitIndex := rf.commitIndex for i := rf.lastApplied + 1; i <= commitIndex; i++ { msg := ApplyMsg{CommandIndex: i, Command: rf.log[i].Log, CommandValid: true} applyCh <- msg rf.lastApplied = i } rf.mtx.Unlock() } } }() return rf }
{ // Your code here, if desired. }
identifier_body
raft.go
package raft // // this is an outline of the API that raft must expose to // the service (or tester). see comments below for // each of these functions for more details. // // rf = Make(...) // create a new Raft server. // rf.Start(command interface{}) (index, Term, isleader) // start agreement on a new log entry // rf.GetState() (Term, isLeader) // ask a Raft for its current Term, and whether it thinks it is leader // ApplyMsg // each time a new entry is committed to the log, each Raft peer // should send an ApplyMsg to the service (or tester) // in the same server. // import ( "bytes" "encoding/gob" "math/rand" "sync" "time" ) import "labrpc" // import "bytes" // import "encoding/gob" const ( STATE_FOLLOWER = 0 STATE_LEADER = 1 STATE_CANDIDATE = 2 SEND_ENTRY_INTERVAL = 50 * time.Millisecond ) ///////tell leader that the commited log can be applied. // as each Raft peer becomes aware that successive log Entries are // committed, the peer should send an ApplyMsg to the service (or // tester) on the same server, via the applyCh passed to Make(). // type ApplyMsg struct { CommandIndex int // index of log array. Command interface{} UseSnapshot bool // ignore for lab2; only used in lab3 Snapshot []byte // ignore for lab2; only used in lab3 CommandValid bool } type logEntries struct { LogIndex int Term int Log interface{} } // // A Go object implementing a single Raft peer. // type Raft struct { mtx sync.Mutex peers []*labrpc.ClientEnd persister *Persister me int // index into peers[] // Your data here. // Look at the paper's Figure 2 for a description of what // state a Raft server must maintain. // persistent state for all state currentTerm int voteFor int log []logEntries // volatile state: first commit -> then apply commitIndex int // init to 0: the index that have been commited. lastApplied int // init to 0: the index that have been applied. // only for leaders nextIndex []int // init to [leader last applied log index + 1] matchIndex []int // init to 0; for each server, index of highest log entry known to be replicated // other fields state int beenVotedCount int // channels chanHeartBeat chan int chanBecomeLeader chan int chanCommit chan int chanVoteOther chan int chanApply chan ApplyMsg } // return currentTerm and whether this server // believes it is the leader. func (rf *Raft) GetState() (int, bool) { // Your code here. //rf.mtx.Lock() //defer rf.mtx.Unlock() return rf.currentTerm, rf.state == STATE_LEADER } // // save Raft's persistent state to stable storage, // where it can later be retrieved after a crash and restart. // see paper's Figure 2 for a description of what should be persistent. // func (rf *Raft) persist() { // Your code here. // Example: // w := new(bytes.Buffer) // e := gob.NewEncoder(w) // e.Encode(rf.xxx) // e.Encode(rf.yyy) // data := w.Bytes() // rf.persister.SaveRaftState(data) w := new(bytes.Buffer) e := gob.NewEncoder(w) e.Encode(rf.currentTerm) e.Encode(rf.voteFor) e.Encode(rf.log) data := w.Bytes() rf.persister.SaveRaftState(data) } // // restore previously persisted state. // func (rf *Raft) readPersist(data []byte) { // Your code here. // Example: // r := bytes.NewBuffer(data) // d := gob.NewDecoder(r) // d.Decode(&rf.xxx) // d.Decode(&rf.yyy) r := bytes.NewBuffer(data) d := gob.NewDecoder(r) d.Decode(&rf.currentTerm) d.Decode(&rf.voteFor) d.Decode(&rf.log) } // example RequestVote RPC arguments structure. type RequestVoteArgs struct { // Your data here. Term int CandidateId int LastLogIndex int LastLogTerm int } // example RequestVote RPC reply structure. type RequestVoteReply struct { // Your data here. Term int VoteGranted bool } type RequestAppendEntriesArgs struct { Term int LeaderId int PrevLogIndex int PrevLogTerm int Entries []logEntries LeaderCommitIndex int /// prevLogIndex, CommitIndex. } type RequestAppendEntriesReply struct { Term int Success bool NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time. } // // example RequestVote RPC handler. // // rpc request/response should check term to convert self to follower; // should check peer's log info to vote peer. func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) { //fmt.Printf("[::RequestVote]\n") // Your code here. rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() reply.VoteGranted = false // case 1: check term if args.Term < rf.currentTerm { reply.Term = rf.currentTerm return } if args.Term > rf.currentTerm { // set term to max. and then maybe become leader. rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = rf.currentTerm // case 2: check log isNewer := false if args.LastLogTerm == rf.log[len(rf.log)-1].Term { isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex } else { isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term }
if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer { rf.chanVoteOther <- 1 rf.state = STATE_FOLLOWER reply.VoteGranted = true rf.voteFor = args.CandidateId } } func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) { // Your code here. // Q: should candidate append entries? //fmt.Println("[::RequestAppendEntries]", args) rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() // case 1: check term reply.Success = false if args.Term < rf.currentTerm { reply.Term = rf.currentTerm reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } rf.chanHeartBeat <- 1 if args.Term > rf.currentTerm { rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = args.Term // case 2: check log number if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex { reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } // case 3: check log term. decrease one each time... if args.PrevLogIndex > 0 { term := rf.log[args.PrevLogIndex].Term if args.PrevLogTerm != term { for i := args.PrevLogIndex - 1; i >= 0; i-- { if rf.log[i].Term != term { reply.NextIndex = i + 1 break } } return } } // step4: success: copy the log. if args.PrevLogIndex < 0 { } else { rf.log = rf.log[:args.PrevLogIndex+1] rf.log = append(rf.log, args.Entries...) reply.Success = true reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 } if args.LeaderCommitIndex > rf.commitIndex { last := rf.log[len(rf.log)-1].LogIndex if args.LeaderCommitIndex > last { rf.commitIndex = last } else { rf.commitIndex = args.LeaderCommitIndex } rf.chanCommit <- 1 } return } // // example code to send a RequestVote RPC to a server. // server is the index of the target server in rf.peers[]. // expects RPC arguments in args. // fills in *reply with RPC reply, so caller should // pass &reply. // the types of the args and reply passed to Call() must be // the same as the types of the arguments declared in the // handler function (including whether they are pointers). // // returns true if labrpc says the RPC was delivered. // // if you're having trouble getting RPC to work, check that you've // capitalized all field names in structs passed over RPC, and // that the caller passes the address of the reply struct with &, not // the struct itself. // func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool { ok := rf.peers[server].Call("Raft.RequestVote", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_CANDIDATE { return ok } if args.Term != rf.currentTerm { // consider the current term's reply return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() } if reply.VoteGranted { rf.beenVotedCount++ if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 { rf.state = STATE_FOLLOWER // ... rf.chanBecomeLeader <- 1 } } } return ok } func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool { //fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term) ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_LEADER { return ok } if args.Term != rf.currentTerm { return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() return ok } if reply.Success { if len(args.Entries) > 0 { rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1 rf.matchIndex[server] = rf.nextIndex[server] - 1 } } else { rf.nextIndex[server] = reply.NextIndex } } return ok } func (rf *Raft) broadcastRequestVote() { var args RequestVoteArgs rf.mtx.Lock() args.Term = rf.currentTerm args.CandidateId = rf.me args.LastLogTerm = rf.log[len(rf.log)-1].Term args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex rf.mtx.Unlock() //fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me) for i := range rf.peers { if i != rf.me && rf.state == STATE_CANDIDATE { go func(i int) { var reply RequestVoteReply rf.sendRequestVote(i, args, &reply) }(i) } } } func (rf *Raft) broadcastAppendEntries() { //fmt.Printf("[::broadcastAppendEntries][Candidate = %v]", rf.me) rf.mtx.Lock() defer rf.mtx.Unlock() N := rf.commitIndex last := rf.log[len(rf.log)-1].LogIndex // step1: iterate all commitLog for i := rf.commitIndex + 1; i <= last; i++ { num := 1 for j := range rf.peers { if j != rf.me && rf.matchIndex[j] >= i && rf.log[i].Term == rf.currentTerm { num++ } } // replicated in majority of node. if 2*num > len(rf.peers) { N = i } } // step2: we can apply these logs. if N != rf.commitIndex { rf.commitIndex = N rf.chanCommit <- 1 /// majority of nodes have commited, then we can move applyIndex // } for i := range rf.peers { if i != rf.me && rf.state == STATE_LEADER { if rf.nextIndex[i] > 0 { // step3: nextIndex[node i] until the end. var args RequestAppendEntriesArgs args.Term = rf.currentTerm args.LeaderId = rf.me args.PrevLogIndex = rf.nextIndex[i] - 1 args.PrevLogTerm = rf.log[args.PrevLogIndex].Term args.Entries = make([]logEntries, len(rf.log[args.PrevLogIndex+1:])) copy(args.Entries, rf.log[args.PrevLogIndex+1:]) args.LeaderCommitIndex = rf.commitIndex go func(i int, args RequestAppendEntriesArgs) { var reply RequestAppendEntriesReply rf.sendAppendEntries(i, args, &reply) }(i, args) } } } } // // the service using Raft (e.g. a k/v server) wants to start // agreement on the next command to be appended to Raft's log. if this // server isn't the leader, returns false. otherwise start the // agreement and return immediately. there is no guarantee that this // command will ever be committed to the Raft log, since the leader // may fail or lose an election. // // the first return value is the index that the command will appear at // if it's ever committed. the second return value is the current // term. the third return value is true if this server believes it is // the leader. // func (rf *Raft) Start(command interface{}) (int, int, bool) { rf.mtx.Lock() defer rf.mtx.Unlock() index := -1 term := rf.currentTerm isLeader := rf.state == STATE_LEADER if isLeader { index = rf.log[len(rf.log)-1].LogIndex + 1 rf.log = append(rf.log, logEntries{Term: term, Log: command, LogIndex: index}) // append new entry from client rf.persist() } return index, term, isLeader } // // the tester calls Kill() when a Raft instance won't // be needed again. you are not required to do anything // in Kill(), but it might be convenient to (for example) // turn off debug output from this instance. // func (rf *Raft) Kill() { // Your code here, if desired. } func (rf *Raft) working() { for { switch rf.state { case STATE_LEADER: rf.broadcastAppendEntries() time.Sleep(SEND_ENTRY_INTERVAL) case STATE_FOLLOWER: select { case <-rf.chanHeartBeat: // RequestAppendEntries case <-rf.chanVoteOther: // RequestVote case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): rf.state = STATE_CANDIDATE } case STATE_CANDIDATE: rf.mtx.Lock() rf.currentTerm++ rf.voteFor = rf.me // voteFor changed with currentTerm. rf.beenVotedCount = 1 rf.persist() rf.mtx.Unlock() go rf.broadcastRequestVote() select { case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): case <-rf.chanHeartBeat: rf.state = STATE_FOLLOWER case <-rf.chanBecomeLeader: rf.mtx.Lock() rf.state = STATE_LEADER rf.nextIndex = make([]int, len(rf.peers)) rf.matchIndex = make([]int, len(rf.peers)) for i := range rf.peers { rf.nextIndex[i] = rf.log[len(rf.log)-1].LogIndex + 1 // initialize. rf.matchIndex[i] = 0 } rf.mtx.Unlock() } } } } // // the service or tester wants to create a Raft server. the ports // of all the Raft servers (including this one) are in peers[]. this // server's port is peers[me]. all the servers' peers[] arrays // have the same order. persister is a place for this server to // save its persistent state, and also initially holds the most // recent saved state, if any. applyCh is a channel on which the // tester or service expects Raft to send ApplyMsg messages. // Make() must return quickly, so it should start goroutines // for any long-running work. // func Make(peers []*labrpc.ClientEnd, me int, persister *Persister, applyCh chan ApplyMsg) *Raft { rf := &Raft{} rf.peers = peers rf.persister = persister rf.me = me // Your initialization code here. rf.state = STATE_FOLLOWER rf.currentTerm = 0 rf.voteFor = -1 rf.log = append(rf.log, logEntries{Term: 0}) rf.commitIndex = 0 rf.lastApplied = 0 rf.chanBecomeLeader = make(chan int, 100) rf.chanCommit = make(chan int, 100) rf.chanHeartBeat = make(chan int, 100) rf.chanVoteOther = make(chan int, 100) rf.chanApply = applyCh // initialize from state persisted before a crash rf.readPersist(persister.ReadRaftState()) go rf.working() go func() { for { select { case <-rf.chanCommit: rf.mtx.Lock() commitIndex := rf.commitIndex for i := rf.lastApplied + 1; i <= commitIndex; i++ { msg := ApplyMsg{CommandIndex: i, Command: rf.log[i].Log, CommandValid: true} applyCh <- msg rf.lastApplied = i } rf.mtx.Unlock() } } }() return rf }
random_line_split
raft.go
package raft // // this is an outline of the API that raft must expose to // the service (or tester). see comments below for // each of these functions for more details. // // rf = Make(...) // create a new Raft server. // rf.Start(command interface{}) (index, Term, isleader) // start agreement on a new log entry // rf.GetState() (Term, isLeader) // ask a Raft for its current Term, and whether it thinks it is leader // ApplyMsg // each time a new entry is committed to the log, each Raft peer // should send an ApplyMsg to the service (or tester) // in the same server. // import ( "bytes" "encoding/gob" "math/rand" "sync" "time" ) import "labrpc" // import "bytes" // import "encoding/gob" const ( STATE_FOLLOWER = 0 STATE_LEADER = 1 STATE_CANDIDATE = 2 SEND_ENTRY_INTERVAL = 50 * time.Millisecond ) ///////tell leader that the commited log can be applied. // as each Raft peer becomes aware that successive log Entries are // committed, the peer should send an ApplyMsg to the service (or // tester) on the same server, via the applyCh passed to Make(). // type ApplyMsg struct { CommandIndex int // index of log array. Command interface{} UseSnapshot bool // ignore for lab2; only used in lab3 Snapshot []byte // ignore for lab2; only used in lab3 CommandValid bool } type logEntries struct { LogIndex int Term int Log interface{} } // // A Go object implementing a single Raft peer. // type Raft struct { mtx sync.Mutex peers []*labrpc.ClientEnd persister *Persister me int // index into peers[] // Your data here. // Look at the paper's Figure 2 for a description of what // state a Raft server must maintain. // persistent state for all state currentTerm int voteFor int log []logEntries // volatile state: first commit -> then apply commitIndex int // init to 0: the index that have been commited. lastApplied int // init to 0: the index that have been applied. // only for leaders nextIndex []int // init to [leader last applied log index + 1] matchIndex []int // init to 0; for each server, index of highest log entry known to be replicated // other fields state int beenVotedCount int // channels chanHeartBeat chan int chanBecomeLeader chan int chanCommit chan int chanVoteOther chan int chanApply chan ApplyMsg } // return currentTerm and whether this server // believes it is the leader. func (rf *Raft) GetState() (int, bool) { // Your code here. //rf.mtx.Lock() //defer rf.mtx.Unlock() return rf.currentTerm, rf.state == STATE_LEADER } // // save Raft's persistent state to stable storage, // where it can later be retrieved after a crash and restart. // see paper's Figure 2 for a description of what should be persistent. // func (rf *Raft) persist() { // Your code here. // Example: // w := new(bytes.Buffer) // e := gob.NewEncoder(w) // e.Encode(rf.xxx) // e.Encode(rf.yyy) // data := w.Bytes() // rf.persister.SaveRaftState(data) w := new(bytes.Buffer) e := gob.NewEncoder(w) e.Encode(rf.currentTerm) e.Encode(rf.voteFor) e.Encode(rf.log) data := w.Bytes() rf.persister.SaveRaftState(data) } // // restore previously persisted state. // func (rf *Raft) readPersist(data []byte) { // Your code here. // Example: // r := bytes.NewBuffer(data) // d := gob.NewDecoder(r) // d.Decode(&rf.xxx) // d.Decode(&rf.yyy) r := bytes.NewBuffer(data) d := gob.NewDecoder(r) d.Decode(&rf.currentTerm) d.Decode(&rf.voteFor) d.Decode(&rf.log) } // example RequestVote RPC arguments structure. type RequestVoteArgs struct { // Your data here. Term int CandidateId int LastLogIndex int LastLogTerm int } // example RequestVote RPC reply structure. type RequestVoteReply struct { // Your data here. Term int VoteGranted bool } type RequestAppendEntriesArgs struct { Term int LeaderId int PrevLogIndex int PrevLogTerm int Entries []logEntries LeaderCommitIndex int /// prevLogIndex, CommitIndex. } type RequestAppendEntriesReply struct { Term int Success bool NextIndex int // used to tell leader that follower's next empty log, or leader can decrease one each time. } // // example RequestVote RPC handler. // // rpc request/response should check term to convert self to follower; // should check peer's log info to vote peer. func (rf *Raft) RequestVote(args RequestVoteArgs, reply *RequestVoteReply) { //fmt.Printf("[::RequestVote]\n") // Your code here. rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() reply.VoteGranted = false // case 1: check term if args.Term < rf.currentTerm { reply.Term = rf.currentTerm return } if args.Term > rf.currentTerm { // set term to max. and then maybe become leader. rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = rf.currentTerm // case 2: check log isNewer := false if args.LastLogTerm == rf.log[len(rf.log)-1].Term { isNewer = args.LastLogIndex >= rf.log[len(rf.log)-1].LogIndex } else { isNewer = args.LastLogTerm > rf.log[len(rf.log)-1].Term } if (rf.voteFor == -1 || rf.voteFor == args.CandidateId) && isNewer { rf.chanVoteOther <- 1 rf.state = STATE_FOLLOWER reply.VoteGranted = true rf.voteFor = args.CandidateId } } func (rf *Raft) RequestAppendEntries(args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) { // Your code here. // Q: should candidate append entries? //fmt.Println("[::RequestAppendEntries]", args) rf.mtx.Lock() defer rf.mtx.Unlock() defer rf.persist() // case 1: check term reply.Success = false if args.Term < rf.currentTerm { reply.Term = rf.currentTerm reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } rf.chanHeartBeat <- 1 if args.Term > rf.currentTerm { rf.currentTerm = args.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 } reply.Term = args.Term // case 2: check log number if args.PrevLogIndex > rf.log[len(rf.log)-1].LogIndex { reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 return } // case 3: check log term. decrease one each time... if args.PrevLogIndex > 0 { term := rf.log[args.PrevLogIndex].Term if args.PrevLogTerm != term { for i := args.PrevLogIndex - 1; i >= 0; i-- { if rf.log[i].Term != term { reply.NextIndex = i + 1 break } } return } } // step4: success: copy the log. if args.PrevLogIndex < 0 { } else { rf.log = rf.log[:args.PrevLogIndex+1] rf.log = append(rf.log, args.Entries...) reply.Success = true reply.NextIndex = rf.log[len(rf.log)-1].LogIndex + 1 } if args.LeaderCommitIndex > rf.commitIndex { last := rf.log[len(rf.log)-1].LogIndex if args.LeaderCommitIndex > last { rf.commitIndex = last } else { rf.commitIndex = args.LeaderCommitIndex } rf.chanCommit <- 1 } return } // // example code to send a RequestVote RPC to a server. // server is the index of the target server in rf.peers[]. // expects RPC arguments in args. // fills in *reply with RPC reply, so caller should // pass &reply. // the types of the args and reply passed to Call() must be // the same as the types of the arguments declared in the // handler function (including whether they are pointers). // // returns true if labrpc says the RPC was delivered. // // if you're having trouble getting RPC to work, check that you've // capitalized all field names in structs passed over RPC, and // that the caller passes the address of the reply struct with &, not // the struct itself. // func (rf *Raft) sendRequestVote(server int, args RequestVoteArgs, reply *RequestVoteReply) bool { ok := rf.peers[server].Call("Raft.RequestVote", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_CANDIDATE { return ok } if args.Term != rf.currentTerm { // consider the current term's reply return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() } if reply.VoteGranted { rf.beenVotedCount++ if rf.state == STATE_CANDIDATE && rf.beenVotedCount > len(rf.peers)/2 { rf.state = STATE_FOLLOWER // ... rf.chanBecomeLeader <- 1 } } } return ok } func (rf *Raft) sendAppendEntries(server int, args RequestAppendEntriesArgs, reply *RequestAppendEntriesReply) bool { //fmt.Printf("[sendAppendEntries][who=%v][term=%v]\n", rf.me, args.Term) ok := rf.peers[server].Call("Raft.RequestAppendEntries", args, reply) rf.mtx.Lock() defer rf.mtx.Unlock() if ok { if rf.state != STATE_LEADER { return ok } if args.Term != rf.currentTerm { return ok } if reply.Term > rf.currentTerm { rf.currentTerm = reply.Term rf.state = STATE_FOLLOWER rf.voteFor = -1 rf.persist() return ok } if reply.Success { if len(args.Entries) > 0 { rf.nextIndex[server] = args.Entries[len(args.Entries)-1].LogIndex + 1 rf.matchIndex[server] = rf.nextIndex[server] - 1 } } else { rf.nextIndex[server] = reply.NextIndex } } return ok } func (rf *Raft) broadcastRequestVote() { var args RequestVoteArgs rf.mtx.Lock() args.Term = rf.currentTerm args.CandidateId = rf.me args.LastLogTerm = rf.log[len(rf.log)-1].Term args.LastLogIndex = rf.log[len(rf.log)-1].LogIndex rf.mtx.Unlock() //fmt.Printf("[broadcastRequestVote][Candidate = %v]\n", rf.me) for i := range rf.peers { if i != rf.me && rf.state == STATE_CANDIDATE { go func(i int) { var reply RequestVoteReply rf.sendRequestVote(i, args, &reply) }(i) } } } func (rf *Raft)
() { //fmt.Printf("[::broadcastAppendEntries][Candidate = %v]", rf.me) rf.mtx.Lock() defer rf.mtx.Unlock() N := rf.commitIndex last := rf.log[len(rf.log)-1].LogIndex // step1: iterate all commitLog for i := rf.commitIndex + 1; i <= last; i++ { num := 1 for j := range rf.peers { if j != rf.me && rf.matchIndex[j] >= i && rf.log[i].Term == rf.currentTerm { num++ } } // replicated in majority of node. if 2*num > len(rf.peers) { N = i } } // step2: we can apply these logs. if N != rf.commitIndex { rf.commitIndex = N rf.chanCommit <- 1 /// majority of nodes have commited, then we can move applyIndex // } for i := range rf.peers { if i != rf.me && rf.state == STATE_LEADER { if rf.nextIndex[i] > 0 { // step3: nextIndex[node i] until the end. var args RequestAppendEntriesArgs args.Term = rf.currentTerm args.LeaderId = rf.me args.PrevLogIndex = rf.nextIndex[i] - 1 args.PrevLogTerm = rf.log[args.PrevLogIndex].Term args.Entries = make([]logEntries, len(rf.log[args.PrevLogIndex+1:])) copy(args.Entries, rf.log[args.PrevLogIndex+1:]) args.LeaderCommitIndex = rf.commitIndex go func(i int, args RequestAppendEntriesArgs) { var reply RequestAppendEntriesReply rf.sendAppendEntries(i, args, &reply) }(i, args) } } } } // // the service using Raft (e.g. a k/v server) wants to start // agreement on the next command to be appended to Raft's log. if this // server isn't the leader, returns false. otherwise start the // agreement and return immediately. there is no guarantee that this // command will ever be committed to the Raft log, since the leader // may fail or lose an election. // // the first return value is the index that the command will appear at // if it's ever committed. the second return value is the current // term. the third return value is true if this server believes it is // the leader. // func (rf *Raft) Start(command interface{}) (int, int, bool) { rf.mtx.Lock() defer rf.mtx.Unlock() index := -1 term := rf.currentTerm isLeader := rf.state == STATE_LEADER if isLeader { index = rf.log[len(rf.log)-1].LogIndex + 1 rf.log = append(rf.log, logEntries{Term: term, Log: command, LogIndex: index}) // append new entry from client rf.persist() } return index, term, isLeader } // // the tester calls Kill() when a Raft instance won't // be needed again. you are not required to do anything // in Kill(), but it might be convenient to (for example) // turn off debug output from this instance. // func (rf *Raft) Kill() { // Your code here, if desired. } func (rf *Raft) working() { for { switch rf.state { case STATE_LEADER: rf.broadcastAppendEntries() time.Sleep(SEND_ENTRY_INTERVAL) case STATE_FOLLOWER: select { case <-rf.chanHeartBeat: // RequestAppendEntries case <-rf.chanVoteOther: // RequestVote case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): rf.state = STATE_CANDIDATE } case STATE_CANDIDATE: rf.mtx.Lock() rf.currentTerm++ rf.voteFor = rf.me // voteFor changed with currentTerm. rf.beenVotedCount = 1 rf.persist() rf.mtx.Unlock() go rf.broadcastRequestVote() select { case <-time.After(time.Duration(rand.Int63()%333+500) * time.Millisecond): case <-rf.chanHeartBeat: rf.state = STATE_FOLLOWER case <-rf.chanBecomeLeader: rf.mtx.Lock() rf.state = STATE_LEADER rf.nextIndex = make([]int, len(rf.peers)) rf.matchIndex = make([]int, len(rf.peers)) for i := range rf.peers { rf.nextIndex[i] = rf.log[len(rf.log)-1].LogIndex + 1 // initialize. rf.matchIndex[i] = 0 } rf.mtx.Unlock() } } } } // // the service or tester wants to create a Raft server. the ports // of all the Raft servers (including this one) are in peers[]. this // server's port is peers[me]. all the servers' peers[] arrays // have the same order. persister is a place for this server to // save its persistent state, and also initially holds the most // recent saved state, if any. applyCh is a channel on which the // tester or service expects Raft to send ApplyMsg messages. // Make() must return quickly, so it should start goroutines // for any long-running work. // func Make(peers []*labrpc.ClientEnd, me int, persister *Persister, applyCh chan ApplyMsg) *Raft { rf := &Raft{} rf.peers = peers rf.persister = persister rf.me = me // Your initialization code here. rf.state = STATE_FOLLOWER rf.currentTerm = 0 rf.voteFor = -1 rf.log = append(rf.log, logEntries{Term: 0}) rf.commitIndex = 0 rf.lastApplied = 0 rf.chanBecomeLeader = make(chan int, 100) rf.chanCommit = make(chan int, 100) rf.chanHeartBeat = make(chan int, 100) rf.chanVoteOther = make(chan int, 100) rf.chanApply = applyCh // initialize from state persisted before a crash rf.readPersist(persister.ReadRaftState()) go rf.working() go func() { for { select { case <-rf.chanCommit: rf.mtx.Lock() commitIndex := rf.commitIndex for i := rf.lastApplied + 1; i <= commitIndex; i++ { msg := ApplyMsg{CommandIndex: i, Command: rf.log[i].Log, CommandValid: true} applyCh <- msg rf.lastApplied = i } rf.mtx.Unlock() } } }() return rf }
broadcastAppendEntries
identifier_name
mod.rs
// High-level Internal Representation of GObject artifacts // // Here we provide a view of the world in terms of what GObject knows: // classes, interfaces, signals, etc. // // We construct this view of the world from the raw Abstract Syntax // Tree (AST) from the previous stage. use std::collections::HashMap; use proc_macro::TokenStream; use proc_macro2::{Delimiter, Span, TokenNode, TokenTree}; use quote::{Tokens, ToTokens}; use syn::{self, Ident, Path, Block, ReturnType}; use syn::punctuated::Punctuated; use syn::synom::Synom; use syn::buffer::TokenBuffer; use super::ast; use super::checking::*; use super::errors::*; use super::glib_utils::*; pub struct Program<'ast> { pub classes: Classes<'ast>, } pub struct Classes<'ast> { items: HashMap<Ident, Class<'ast>>, } pub struct Class<'ast> { pub name: Ident, // Foo pub gobject_parent: bool, pub parent: Tokens, // Parent pub parent_ffi: Tokens, // ffi::Parent pub parent_class_ffi: Tokens, // ffi::ParentClass pub implements: Vec<Path>, // names of GTypeInterfaces pub instance_private: Option<&'ast Path>, // pub class_private: Option<&'ast ast::PrivateStruct> // The order of these is important; it's the order of the slots in FooClass pub slots: Vec<Slot<'ast>>, // pub n_reserved_slots: usize, // pub properties: Vec<Property>, pub overrides: HashMap<Ident, Vec<Method<'ast>>> } pub enum Slot<'ast> { Method(Method<'ast>), VirtualMethod(VirtualMethod<'ast>), Signal(Signal) } pub struct Method<'ast> { pub public: bool, pub sig: FnSig<'ast>, pub body: &'ast Block, } pub struct VirtualMethod<'ast> { pub sig: FnSig<'ast>, pub body: Option<&'ast Block>, } pub struct FnSig<'ast> { pub name: Ident, pub inputs: Vec<FnArg<'ast>>, pub output: Ty<'ast>, } pub enum FnArg<'ast> { SelfRef(Token!(&), Token!(self)), Arg { mutbl: Option<Token![mut]>, name: Ident, ty: Ty<'ast>, } } pub struct Signal { // FIXME: signal flags } pub enum Ty<'ast> { Unit, Char(Ident), Bool(Ident), Borrowed(Box<Ty<'ast>>), Integer(Ident), Owned(&'ast syn::Path), } impl<'ast> Program<'ast> { pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> { check_program(ast)?; let mut classes = Classes::new(); for class in ast.classes() { classes.add(class)?; } for impl_ in ast.impls() { classes.add_impl(impl_)?; } Ok(Program { classes: classes, }) } } impl<'ast> Classes<'ast> { fn new() -> Classes<'ast> { Classes { items: HashMap::new(), } } pub fn len(&self) -> usize { self.items.len() } pub fn get(&self, name: &str) -> &Class { self.items.iter().find(|c| c.1.name == name).unwrap().1 } fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()> { let prev = self.items.insert(ast_class.name, Class { name: ast_class.name, gobject_parent: ast_class.extends.is_none(), parent: tokens_ParentInstance(ast_class), parent_ffi: tokens_ParentInstanceFfi(ast_class), parent_class_ffi: tokens_ParentClassFfi(ast_class), implements: Vec::new(), instance_private: ast_class.items.iter().filter_map(|i| { match *i { ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path), } }).next(), slots: Vec::new(), overrides: HashMap::new(), }); if prev.is_some() { bail!("redefinition of class `{}`", ast_class.name); } Ok(()) } fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> { let class = match self.items.get_mut(&impl_.self_path) { Some(class) => class, None => bail!("impl for class that doesn't exist: {}", impl_.self_path), }; match impl_.trait_ { Some(parent_class) => { for item in impl_.items.iter() { let item = match item.node { ast::ImplItemKind::Method(ref m) => m, ast::ImplItemKind::ReserveSlots(_) => { bail!("can't reserve slots in a parent class impl"); } }; if item.signal { bail!("can't implement signals for parent classes") } if !item.virtual_ { bail!("can only implement virtual functions for parent classes") } if item.public { bail!("overrides are always public, no `pub` needed") } let method = match class.translate_method(item)? { Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => { Method { public: false, sig, body } } Slot::VirtualMethod(VirtualMethod { .. }) => { bail!("overrides must provide a body for virtual \ methods"); } _ => unreachable!(), }; class.overrides .entry(parent_class) .or_insert(Vec::new()) .push(method); } } None => { for item in impl_.items.iter() { let slot = class.translate_slot(item)?; class.slots.push(slot); } } } Ok(()) } pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a { self.items.values() } } impl<'ast> Class<'ast> { fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> { assert_eq!(item.attrs.len(), 0); // attributes unimplemented match item.node { ast::ImplItemKind::Method(ref method) => self.translate_method(method), ast::ImplItemKind::ReserveSlots(ref _slots) => { panic!("reserve slots not implemented"); } } } fn translate_method(&mut self, method: &'ast ast::ImplItemMethod) -> Result<Slot<'ast>> { if method.signal { panic!("signals not implemented"); } if method.virtual_ { if method.public { bail!("function `{}` is virtual so it doesn't need to be public", method.name) } let sig = self.extract_sig(method)?; Ok(Slot::VirtualMethod(VirtualMethod { sig, body: method.body.as_ref(), })) } else { let sig = self.extract_sig(method)?; Ok(Slot::Method(Method { sig, public: method.public, body: method.body.as_ref().ok_or_else(|| { format!("function `{}` requires a body", method.name) })?, })) } } fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> { Ok(FnSig { output: self.extract_output(&method.output)?, inputs: self.extract_inputs(&method.inputs)?, name: method.name, }) } fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>>
fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> { punc.iter().map(|arg| { match *arg { syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => { let (name, mutbl) = match *pat { syn::Pat::Ident(syn::PatIdent { by_ref: None, mutability: m, ident, subpat: None, }) => { (ident, m) } _ => bail!("only bare identifiers are allowed as \ argument patterns"), }; Ok(FnArg::Arg { mutbl, name, ty: self.extract_ty(ty)?, }) } syn::FnArg::SelfRef(syn::ArgSelfRef { and_token, lifetime: None, mutability: None, self_token, }) => { Ok(FnArg::SelfRef(and_token, self_token)) } syn::FnArg::SelfRef(syn::ArgSelfRef { mutability: Some(..), .. }) => { bail!("&mut self not implemented yet") } syn::FnArg::SelfRef(syn::ArgSelfRef { lifetime: Some(..), .. }) => { bail!("lifetime arguments on self not implemented yet") } syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"), syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"), syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"), } }).collect() } fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> { match *t { syn::Type::Slice(_) => bail!("slice types not implemented yet"), syn::Type::Array(_) => bail!("array types not implemented yet"), syn::Type::Ptr(_) => bail!("ptr types not implemented yet"), syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => { bail!("borrowed types with lifetimes not implemented yet") } syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => { if let Some(_) = *mutability { bail!("mutable borrowed pointers not implemented"); } let path = match **elem { syn::Type::Path(syn::TypePath { qself: None, ref path }) => path, _ => bail!("only borrowed pointers to paths supported"), }; let ty = self.extract_ty_path(path)?; Ok(Ty::Borrowed(Box::new(ty))) } syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"), syn::Type::Never(_) => bail!("never not implemented yet"), syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => { if elems.len() == 0 { Ok(Ty::Unit) } else { bail!("tuple types not implemented yet") } } syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => { bail!("path types with qualified self (`as` syntax) not allowed") } syn::Type::Path(syn::TypePath { qself: None, ref path }) => { self.extract_ty_path(path) } syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"), syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"), syn::Type::Paren(syn::TypeParen { ref elem, .. }) => self.extract_ty(elem), syn::Type::Group(syn::TypeGroup { ref elem, .. }) => self.extract_ty(elem), syn::Type::Infer(_) => bail!("underscore types not allowed"), syn::Type::Macro(_) => bail!("type macros not allowed"), syn::Type::Verbatim(_) => bail!("type macros not allowed"), } } fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> { if t.segments.iter().any(|segment| { match segment.arguments { syn::PathArguments::None => false, _ => true, } }) { bail!("type or lifetime parameters not allowed") } if t.leading_colon.is_some() || t.segments.len() > 1 { return Ok(Ty::Owned(t)) } // let ident = t.segments.get(0).item().ident; let ident = t.segments.first().unwrap().value().ident; match ident.as_ref() { "char" => Ok(Ty::Char(ident)), "bool" => Ok(Ty::Bool(ident)), "i8" | "i16" | "i32" | "i64" | "isize" | "u8" | "u16" | "u32" | "u64" | "usize" => { Ok(Ty::Integer(ident)) } _other => Ok(Ty::Owned(t)), } } } fn make_path_glib_object() -> Path { let tokens = quote_cs! { glib::Object }; let token_stream = TokenStream::from(tokens); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); Path::parse(cursor).unwrap().0 } impl<'a> ToTokens for FnArg<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { FnArg::SelfRef(and, self_) => { and.to_tokens(tokens); self_.to_tokens(tokens); } FnArg::Arg { name, ref ty, mutbl } => { mutbl.to_tokens(tokens); name.to_tokens(tokens); Token!(:)([Span::def_site()]).to_tokens(tokens); ty.to_tokens(tokens); } } } } impl<'a> ToTokens for Ty<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Ty::Unit => tokens.append(TokenTree { span: Span::call_site(), kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()), }), Ty::Char(tok) => tok.to_tokens(tokens), Ty::Bool(tok) => tok.to_tokens(tokens), Ty::Integer(t) => t.to_tokens(tokens), Ty::Borrowed(ref t) => { Token!(&)([Span::def_site()]).to_tokens(tokens); t.to_tokens(tokens) } Ty::Owned(t) => t.to_tokens(tokens), } } } pub mod tests { use super::*; pub fn run() { creates_trivial_class(); creates_class_with_superclass(); } fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) { let token_stream = raw.parse::<TokenStream>().unwrap(); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); let ast_program = ast::Program::parse(cursor).unwrap().0; let program = Program::from_ast_program(&ast_program).unwrap(); assert!(program.classes.len() == 1); let class = program.classes.get(class_name); assert_eq!(class.name.as_ref(), class_name); assert_eq!(class.parent.to_string(), superclass_name); } fn creates_trivial_class() { let raw = "class Foo {}"; test_class_and_superclass(raw, "Foo", "glib :: Object"); } fn creates_class_with_superclass() { let raw = "class Foo: Bar {}"; test_class_and_superclass(raw, "Foo", "Bar"); } }
{ match *output { ReturnType::Type(_, ref boxt) => self.extract_ty(boxt), ReturnType::Default => Ok(Ty::Unit), } }
identifier_body
mod.rs
// High-level Internal Representation of GObject artifacts // // Here we provide a view of the world in terms of what GObject knows: // classes, interfaces, signals, etc. // // We construct this view of the world from the raw Abstract Syntax // Tree (AST) from the previous stage. use std::collections::HashMap; use proc_macro::TokenStream; use proc_macro2::{Delimiter, Span, TokenNode, TokenTree}; use quote::{Tokens, ToTokens}; use syn::{self, Ident, Path, Block, ReturnType}; use syn::punctuated::Punctuated; use syn::synom::Synom; use syn::buffer::TokenBuffer; use super::ast; use super::checking::*; use super::errors::*; use super::glib_utils::*; pub struct Program<'ast> { pub classes: Classes<'ast>, } pub struct Classes<'ast> { items: HashMap<Ident, Class<'ast>>, } pub struct Class<'ast> { pub name: Ident, // Foo pub gobject_parent: bool, pub parent: Tokens, // Parent pub parent_ffi: Tokens, // ffi::Parent pub parent_class_ffi: Tokens, // ffi::ParentClass pub implements: Vec<Path>, // names of GTypeInterfaces pub instance_private: Option<&'ast Path>, // pub class_private: Option<&'ast ast::PrivateStruct> // The order of these is important; it's the order of the slots in FooClass pub slots: Vec<Slot<'ast>>, // pub n_reserved_slots: usize, // pub properties: Vec<Property>, pub overrides: HashMap<Ident, Vec<Method<'ast>>> } pub enum Slot<'ast> { Method(Method<'ast>), VirtualMethod(VirtualMethod<'ast>), Signal(Signal) } pub struct Method<'ast> { pub public: bool, pub sig: FnSig<'ast>, pub body: &'ast Block, } pub struct VirtualMethod<'ast> { pub sig: FnSig<'ast>, pub body: Option<&'ast Block>, } pub struct FnSig<'ast> { pub name: Ident, pub inputs: Vec<FnArg<'ast>>, pub output: Ty<'ast>, } pub enum FnArg<'ast> { SelfRef(Token!(&), Token!(self)), Arg { mutbl: Option<Token![mut]>, name: Ident, ty: Ty<'ast>, } } pub struct Signal { // FIXME: signal flags } pub enum Ty<'ast> { Unit, Char(Ident), Bool(Ident), Borrowed(Box<Ty<'ast>>), Integer(Ident), Owned(&'ast syn::Path), } impl<'ast> Program<'ast> { pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> { check_program(ast)?; let mut classes = Classes::new(); for class in ast.classes() { classes.add(class)?; } for impl_ in ast.impls() { classes.add_impl(impl_)?; } Ok(Program { classes: classes, }) } } impl<'ast> Classes<'ast> { fn new() -> Classes<'ast> { Classes { items: HashMap::new(), } } pub fn len(&self) -> usize { self.items.len() } pub fn get(&self, name: &str) -> &Class { self.items.iter().find(|c| c.1.name == name).unwrap().1 } fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()> { let prev = self.items.insert(ast_class.name, Class { name: ast_class.name, gobject_parent: ast_class.extends.is_none(), parent: tokens_ParentInstance(ast_class), parent_ffi: tokens_ParentInstanceFfi(ast_class), parent_class_ffi: tokens_ParentClassFfi(ast_class), implements: Vec::new(), instance_private: ast_class.items.iter().filter_map(|i| { match *i { ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path), } }).next(), slots: Vec::new(), overrides: HashMap::new(), }); if prev.is_some() { bail!("redefinition of class `{}`", ast_class.name); } Ok(()) } fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> { let class = match self.items.get_mut(&impl_.self_path) { Some(class) => class, None => bail!("impl for class that doesn't exist: {}", impl_.self_path), }; match impl_.trait_ { Some(parent_class) => { for item in impl_.items.iter() { let item = match item.node { ast::ImplItemKind::Method(ref m) => m, ast::ImplItemKind::ReserveSlots(_) => { bail!("can't reserve slots in a parent class impl"); } }; if item.signal { bail!("can't implement signals for parent classes") } if !item.virtual_ { bail!("can only implement virtual functions for parent classes") } if item.public { bail!("overrides are always public, no `pub` needed") } let method = match class.translate_method(item)? { Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => { Method { public: false, sig, body } } Slot::VirtualMethod(VirtualMethod { .. }) => { bail!("overrides must provide a body for virtual \ methods"); } _ => unreachable!(), }; class.overrides .entry(parent_class) .or_insert(Vec::new()) .push(method); } } None => { for item in impl_.items.iter() { let slot = class.translate_slot(item)?; class.slots.push(slot);
} } } Ok(()) } pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a { self.items.values() } } impl<'ast> Class<'ast> { fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> { assert_eq!(item.attrs.len(), 0); // attributes unimplemented match item.node { ast::ImplItemKind::Method(ref method) => self.translate_method(method), ast::ImplItemKind::ReserveSlots(ref _slots) => { panic!("reserve slots not implemented"); } } } fn translate_method(&mut self, method: &'ast ast::ImplItemMethod) -> Result<Slot<'ast>> { if method.signal { panic!("signals not implemented"); } if method.virtual_ { if method.public { bail!("function `{}` is virtual so it doesn't need to be public", method.name) } let sig = self.extract_sig(method)?; Ok(Slot::VirtualMethod(VirtualMethod { sig, body: method.body.as_ref(), })) } else { let sig = self.extract_sig(method)?; Ok(Slot::Method(Method { sig, public: method.public, body: method.body.as_ref().ok_or_else(|| { format!("function `{}` requires a body", method.name) })?, })) } } fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> { Ok(FnSig { output: self.extract_output(&method.output)?, inputs: self.extract_inputs(&method.inputs)?, name: method.name, }) } fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> { match *output { ReturnType::Type(_, ref boxt) => self.extract_ty(boxt), ReturnType::Default => Ok(Ty::Unit), } } fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> { punc.iter().map(|arg| { match *arg { syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => { let (name, mutbl) = match *pat { syn::Pat::Ident(syn::PatIdent { by_ref: None, mutability: m, ident, subpat: None, }) => { (ident, m) } _ => bail!("only bare identifiers are allowed as \ argument patterns"), }; Ok(FnArg::Arg { mutbl, name, ty: self.extract_ty(ty)?, }) } syn::FnArg::SelfRef(syn::ArgSelfRef { and_token, lifetime: None, mutability: None, self_token, }) => { Ok(FnArg::SelfRef(and_token, self_token)) } syn::FnArg::SelfRef(syn::ArgSelfRef { mutability: Some(..), .. }) => { bail!("&mut self not implemented yet") } syn::FnArg::SelfRef(syn::ArgSelfRef { lifetime: Some(..), .. }) => { bail!("lifetime arguments on self not implemented yet") } syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"), syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"), syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"), } }).collect() } fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> { match *t { syn::Type::Slice(_) => bail!("slice types not implemented yet"), syn::Type::Array(_) => bail!("array types not implemented yet"), syn::Type::Ptr(_) => bail!("ptr types not implemented yet"), syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => { bail!("borrowed types with lifetimes not implemented yet") } syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => { if let Some(_) = *mutability { bail!("mutable borrowed pointers not implemented"); } let path = match **elem { syn::Type::Path(syn::TypePath { qself: None, ref path }) => path, _ => bail!("only borrowed pointers to paths supported"), }; let ty = self.extract_ty_path(path)?; Ok(Ty::Borrowed(Box::new(ty))) } syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"), syn::Type::Never(_) => bail!("never not implemented yet"), syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => { if elems.len() == 0 { Ok(Ty::Unit) } else { bail!("tuple types not implemented yet") } } syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => { bail!("path types with qualified self (`as` syntax) not allowed") } syn::Type::Path(syn::TypePath { qself: None, ref path }) => { self.extract_ty_path(path) } syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"), syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"), syn::Type::Paren(syn::TypeParen { ref elem, .. }) => self.extract_ty(elem), syn::Type::Group(syn::TypeGroup { ref elem, .. }) => self.extract_ty(elem), syn::Type::Infer(_) => bail!("underscore types not allowed"), syn::Type::Macro(_) => bail!("type macros not allowed"), syn::Type::Verbatim(_) => bail!("type macros not allowed"), } } fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> { if t.segments.iter().any(|segment| { match segment.arguments { syn::PathArguments::None => false, _ => true, } }) { bail!("type or lifetime parameters not allowed") } if t.leading_colon.is_some() || t.segments.len() > 1 { return Ok(Ty::Owned(t)) } // let ident = t.segments.get(0).item().ident; let ident = t.segments.first().unwrap().value().ident; match ident.as_ref() { "char" => Ok(Ty::Char(ident)), "bool" => Ok(Ty::Bool(ident)), "i8" | "i16" | "i32" | "i64" | "isize" | "u8" | "u16" | "u32" | "u64" | "usize" => { Ok(Ty::Integer(ident)) } _other => Ok(Ty::Owned(t)), } } } fn make_path_glib_object() -> Path { let tokens = quote_cs! { glib::Object }; let token_stream = TokenStream::from(tokens); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); Path::parse(cursor).unwrap().0 } impl<'a> ToTokens for FnArg<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { FnArg::SelfRef(and, self_) => { and.to_tokens(tokens); self_.to_tokens(tokens); } FnArg::Arg { name, ref ty, mutbl } => { mutbl.to_tokens(tokens); name.to_tokens(tokens); Token!(:)([Span::def_site()]).to_tokens(tokens); ty.to_tokens(tokens); } } } } impl<'a> ToTokens for Ty<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Ty::Unit => tokens.append(TokenTree { span: Span::call_site(), kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()), }), Ty::Char(tok) => tok.to_tokens(tokens), Ty::Bool(tok) => tok.to_tokens(tokens), Ty::Integer(t) => t.to_tokens(tokens), Ty::Borrowed(ref t) => { Token!(&)([Span::def_site()]).to_tokens(tokens); t.to_tokens(tokens) } Ty::Owned(t) => t.to_tokens(tokens), } } } pub mod tests { use super::*; pub fn run() { creates_trivial_class(); creates_class_with_superclass(); } fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) { let token_stream = raw.parse::<TokenStream>().unwrap(); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); let ast_program = ast::Program::parse(cursor).unwrap().0; let program = Program::from_ast_program(&ast_program).unwrap(); assert!(program.classes.len() == 1); let class = program.classes.get(class_name); assert_eq!(class.name.as_ref(), class_name); assert_eq!(class.parent.to_string(), superclass_name); } fn creates_trivial_class() { let raw = "class Foo {}"; test_class_and_superclass(raw, "Foo", "glib :: Object"); } fn creates_class_with_superclass() { let raw = "class Foo: Bar {}"; test_class_and_superclass(raw, "Foo", "Bar"); } }
random_line_split
mod.rs
// High-level Internal Representation of GObject artifacts // // Here we provide a view of the world in terms of what GObject knows: // classes, interfaces, signals, etc. // // We construct this view of the world from the raw Abstract Syntax // Tree (AST) from the previous stage. use std::collections::HashMap; use proc_macro::TokenStream; use proc_macro2::{Delimiter, Span, TokenNode, TokenTree}; use quote::{Tokens, ToTokens}; use syn::{self, Ident, Path, Block, ReturnType}; use syn::punctuated::Punctuated; use syn::synom::Synom; use syn::buffer::TokenBuffer; use super::ast; use super::checking::*; use super::errors::*; use super::glib_utils::*; pub struct Program<'ast> { pub classes: Classes<'ast>, } pub struct Classes<'ast> { items: HashMap<Ident, Class<'ast>>, } pub struct Class<'ast> { pub name: Ident, // Foo pub gobject_parent: bool, pub parent: Tokens, // Parent pub parent_ffi: Tokens, // ffi::Parent pub parent_class_ffi: Tokens, // ffi::ParentClass pub implements: Vec<Path>, // names of GTypeInterfaces pub instance_private: Option<&'ast Path>, // pub class_private: Option<&'ast ast::PrivateStruct> // The order of these is important; it's the order of the slots in FooClass pub slots: Vec<Slot<'ast>>, // pub n_reserved_slots: usize, // pub properties: Vec<Property>, pub overrides: HashMap<Ident, Vec<Method<'ast>>> } pub enum Slot<'ast> { Method(Method<'ast>), VirtualMethod(VirtualMethod<'ast>), Signal(Signal) } pub struct Method<'ast> { pub public: bool, pub sig: FnSig<'ast>, pub body: &'ast Block, } pub struct VirtualMethod<'ast> { pub sig: FnSig<'ast>, pub body: Option<&'ast Block>, } pub struct FnSig<'ast> { pub name: Ident, pub inputs: Vec<FnArg<'ast>>, pub output: Ty<'ast>, } pub enum FnArg<'ast> { SelfRef(Token!(&), Token!(self)), Arg { mutbl: Option<Token![mut]>, name: Ident, ty: Ty<'ast>, } } pub struct Signal { // FIXME: signal flags } pub enum Ty<'ast> { Unit, Char(Ident), Bool(Ident), Borrowed(Box<Ty<'ast>>), Integer(Ident), Owned(&'ast syn::Path), } impl<'ast> Program<'ast> { pub fn from_ast_program(ast: &'ast ast::Program) -> Result<Program<'ast>> { check_program(ast)?; let mut classes = Classes::new(); for class in ast.classes() { classes.add(class)?; } for impl_ in ast.impls() { classes.add_impl(impl_)?; } Ok(Program { classes: classes, }) } } impl<'ast> Classes<'ast> { fn new() -> Classes<'ast> { Classes { items: HashMap::new(), } } pub fn
(&self) -> usize { self.items.len() } pub fn get(&self, name: &str) -> &Class { self.items.iter().find(|c| c.1.name == name).unwrap().1 } fn add(&mut self, ast_class: &'ast ast::Class) -> Result<()> { let prev = self.items.insert(ast_class.name, Class { name: ast_class.name, gobject_parent: ast_class.extends.is_none(), parent: tokens_ParentInstance(ast_class), parent_ffi: tokens_ParentInstanceFfi(ast_class), parent_class_ffi: tokens_ParentClassFfi(ast_class), implements: Vec::new(), instance_private: ast_class.items.iter().filter_map(|i| { match *i { ast::ClassItem::InstancePrivate(ref ip) => Some(&ip.path), } }).next(), slots: Vec::new(), overrides: HashMap::new(), }); if prev.is_some() { bail!("redefinition of class `{}`", ast_class.name); } Ok(()) } fn add_impl(&mut self, impl_: &'ast ast::Impl) -> Result<()> { let class = match self.items.get_mut(&impl_.self_path) { Some(class) => class, None => bail!("impl for class that doesn't exist: {}", impl_.self_path), }; match impl_.trait_ { Some(parent_class) => { for item in impl_.items.iter() { let item = match item.node { ast::ImplItemKind::Method(ref m) => m, ast::ImplItemKind::ReserveSlots(_) => { bail!("can't reserve slots in a parent class impl"); } }; if item.signal { bail!("can't implement signals for parent classes") } if !item.virtual_ { bail!("can only implement virtual functions for parent classes") } if item.public { bail!("overrides are always public, no `pub` needed") } let method = match class.translate_method(item)? { Slot::VirtualMethod(VirtualMethod { sig, body: Some(body) }) => { Method { public: false, sig, body } } Slot::VirtualMethod(VirtualMethod { .. }) => { bail!("overrides must provide a body for virtual \ methods"); } _ => unreachable!(), }; class.overrides .entry(parent_class) .or_insert(Vec::new()) .push(method); } } None => { for item in impl_.items.iter() { let slot = class.translate_slot(item)?; class.slots.push(slot); } } } Ok(()) } pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Class> + 'a { self.items.values() } } impl<'ast> Class<'ast> { fn translate_slot(&mut self, item: &'ast ast::ImplItem) -> Result<Slot<'ast>> { assert_eq!(item.attrs.len(), 0); // attributes unimplemented match item.node { ast::ImplItemKind::Method(ref method) => self.translate_method(method), ast::ImplItemKind::ReserveSlots(ref _slots) => { panic!("reserve slots not implemented"); } } } fn translate_method(&mut self, method: &'ast ast::ImplItemMethod) -> Result<Slot<'ast>> { if method.signal { panic!("signals not implemented"); } if method.virtual_ { if method.public { bail!("function `{}` is virtual so it doesn't need to be public", method.name) } let sig = self.extract_sig(method)?; Ok(Slot::VirtualMethod(VirtualMethod { sig, body: method.body.as_ref(), })) } else { let sig = self.extract_sig(method)?; Ok(Slot::Method(Method { sig, public: method.public, body: method.body.as_ref().ok_or_else(|| { format!("function `{}` requires a body", method.name) })?, })) } } fn extract_sig(&mut self, method: &'ast ast::ImplItemMethod) -> Result<FnSig<'ast>> { Ok(FnSig { output: self.extract_output(&method.output)?, inputs: self.extract_inputs(&method.inputs)?, name: method.name, }) } fn extract_output(&mut self, output: &'ast ReturnType) -> Result<Ty<'ast>> { match *output { ReturnType::Type(_, ref boxt) => self.extract_ty(boxt), ReturnType::Default => Ok(Ty::Unit), } } fn extract_inputs(&mut self, punc: &'ast Punctuated<syn::FnArg, Token!(,)>) -> Result<Vec<FnArg<'ast>>> { punc.iter().map(|arg| { match *arg { syn::FnArg::Captured(syn::ArgCaptured { ref pat, ref ty, .. }) => { let (name, mutbl) = match *pat { syn::Pat::Ident(syn::PatIdent { by_ref: None, mutability: m, ident, subpat: None, }) => { (ident, m) } _ => bail!("only bare identifiers are allowed as \ argument patterns"), }; Ok(FnArg::Arg { mutbl, name, ty: self.extract_ty(ty)?, }) } syn::FnArg::SelfRef(syn::ArgSelfRef { and_token, lifetime: None, mutability: None, self_token, }) => { Ok(FnArg::SelfRef(and_token, self_token)) } syn::FnArg::SelfRef(syn::ArgSelfRef { mutability: Some(..), .. }) => { bail!("&mut self not implemented yet") } syn::FnArg::SelfRef(syn::ArgSelfRef { lifetime: Some(..), .. }) => { bail!("lifetime arguments on self not implemented yet") } syn::FnArg::SelfValue(_) => bail!("by-value self not implemented"), syn::FnArg::Inferred(_) => bail!("cannot have inferred function arguments"), syn::FnArg::Ignored(_) => bail!("cannot have ignored function arguments"), } }).collect() } fn extract_ty(&mut self, t: &'ast syn::Type) -> Result<Ty<'ast>> { match *t { syn::Type::Slice(_) => bail!("slice types not implemented yet"), syn::Type::Array(_) => bail!("array types not implemented yet"), syn::Type::Ptr(_) => bail!("ptr types not implemented yet"), syn::Type::Reference(syn::TypeReference { lifetime: Some(_), .. }) => { bail!("borrowed types with lifetimes not implemented yet") } syn::Type::Reference(syn::TypeReference { lifetime: None, ref elem, ref mutability, .. }) => { if let Some(_) = *mutability { bail!("mutable borrowed pointers not implemented"); } let path = match **elem { syn::Type::Path(syn::TypePath { qself: None, ref path }) => path, _ => bail!("only borrowed pointers to paths supported"), }; let ty = self.extract_ty_path(path)?; Ok(Ty::Borrowed(Box::new(ty))) } syn::Type::BareFn(_) => bail!("function pointer types not implemented yet"), syn::Type::Never(_) => bail!("never not implemented yet"), syn::Type::Tuple(syn::TypeTuple { ref elems, .. }) => { if elems.len() == 0 { Ok(Ty::Unit) } else { bail!("tuple types not implemented yet") } } syn::Type::Path(syn::TypePath { qself: Some(_), .. }) => { bail!("path types with qualified self (`as` syntax) not allowed") } syn::Type::Path(syn::TypePath { qself: None, ref path }) => { self.extract_ty_path(path) } syn::Type::TraitObject(_) => bail!("trait objects not implemented yet"), syn::Type::ImplTrait(_) => bail!("trait objects not implemented yet"), syn::Type::Paren(syn::TypeParen { ref elem, .. }) => self.extract_ty(elem), syn::Type::Group(syn::TypeGroup { ref elem, .. }) => self.extract_ty(elem), syn::Type::Infer(_) => bail!("underscore types not allowed"), syn::Type::Macro(_) => bail!("type macros not allowed"), syn::Type::Verbatim(_) => bail!("type macros not allowed"), } } fn extract_ty_path(&mut self, t: &'ast syn::Path) -> Result<Ty<'ast>> { if t.segments.iter().any(|segment| { match segment.arguments { syn::PathArguments::None => false, _ => true, } }) { bail!("type or lifetime parameters not allowed") } if t.leading_colon.is_some() || t.segments.len() > 1 { return Ok(Ty::Owned(t)) } // let ident = t.segments.get(0).item().ident; let ident = t.segments.first().unwrap().value().ident; match ident.as_ref() { "char" => Ok(Ty::Char(ident)), "bool" => Ok(Ty::Bool(ident)), "i8" | "i16" | "i32" | "i64" | "isize" | "u8" | "u16" | "u32" | "u64" | "usize" => { Ok(Ty::Integer(ident)) } _other => Ok(Ty::Owned(t)), } } } fn make_path_glib_object() -> Path { let tokens = quote_cs! { glib::Object }; let token_stream = TokenStream::from(tokens); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); Path::parse(cursor).unwrap().0 } impl<'a> ToTokens for FnArg<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { FnArg::SelfRef(and, self_) => { and.to_tokens(tokens); self_.to_tokens(tokens); } FnArg::Arg { name, ref ty, mutbl } => { mutbl.to_tokens(tokens); name.to_tokens(tokens); Token!(:)([Span::def_site()]).to_tokens(tokens); ty.to_tokens(tokens); } } } } impl<'a> ToTokens for Ty<'a> { fn to_tokens(&self, tokens: &mut Tokens) { match *self { Ty::Unit => tokens.append(TokenTree { span: Span::call_site(), kind: TokenNode::Group(Delimiter::Parenthesis, quote!{ () }.into()), }), Ty::Char(tok) => tok.to_tokens(tokens), Ty::Bool(tok) => tok.to_tokens(tokens), Ty::Integer(t) => t.to_tokens(tokens), Ty::Borrowed(ref t) => { Token!(&)([Span::def_site()]).to_tokens(tokens); t.to_tokens(tokens) } Ty::Owned(t) => t.to_tokens(tokens), } } } pub mod tests { use super::*; pub fn run() { creates_trivial_class(); creates_class_with_superclass(); } fn test_class_and_superclass (raw: &str, class_name: &str, superclass_name: &str) { let token_stream = raw.parse::<TokenStream>().unwrap(); let buffer = TokenBuffer::new(token_stream); let cursor = buffer.begin(); let ast_program = ast::Program::parse(cursor).unwrap().0; let program = Program::from_ast_program(&ast_program).unwrap(); assert!(program.classes.len() == 1); let class = program.classes.get(class_name); assert_eq!(class.name.as_ref(), class_name); assert_eq!(class.parent.to_string(), superclass_name); } fn creates_trivial_class() { let raw = "class Foo {}"; test_class_and_superclass(raw, "Foo", "glib :: Object"); } fn creates_class_with_superclass() { let raw = "class Foo: Bar {}"; test_class_and_superclass(raw, "Foo", "Bar"); } }
len
identifier_name
events.py
import abc import hashlib import time from functools import reduce from logging import getLevelName import attr import numpy as np import pathlib2 import six from PIL import Image from six.moves.urllib.parse import urlparse, urlunparse from ...backend_api.services import events from ...config import deferred_config from ...debugging.log import LoggerRoot from ...storage.util import quote_url from ...utilities.attrs import attrs from ...utilities.process.mp import SingletonLock @six.add_metaclass(abc.ABCMeta) class MetricsEventAdapter(object): """ Adapter providing all the base attributes required by a metrics event and defining an interface used by the metrics manager when batching and writing events. """ default_nan_value = 0. default_inf_value = 0. """ Default value used when a np.nan or np.inf value is encountered """ report_nan_warning_period = 1000 report_inf_warning_period = 1000 _report_nan_warning_iteration = float('inf') _report_inf_warning_iteration = float('inf') @attrs(cmp=False, slots=True) class FileEntry(object): """ File entry used to report on file data that needs to be uploaded prior to sending the event """ event = attr.attrib() name = attr.attrib() """ File name """ stream = attr.attrib() """ File-like object containing the file's data """ url_prop = attr.attrib() """ Property name that should be updated with the uploaded url """ key_prop = attr.attrib() upload_uri = attr.attrib() url = attr.attrib(default=None) exception = attr.attrib(default=None) retries = attr.attrib(default=None) delete_local_file = attr.attrib(default=True) """ Local file path, if exists, delete the file after upload completed """ def set_exception(self, exp): self.exception = exp self.event.upload_exception = exp @property def metric(self): return self._metric @metric.setter def metric(self, value): self._metric = value @property def variant(self): return self._variant def __init__(self, metric, variant, iter=None, timestamp=None, task=None, gen_timestamp_if_none=True, model_event=None): if not timestamp and gen_timestamp_if_none: timestamp = int(time.time() * 1000) self._metric = metric self._variant = variant self._iter = iter self._timestamp = timestamp self._task = task self._model_event = model_event # Try creating an event just to trigger validation _ = self.get_api_event() self.upload_exception = None @abc.abstractmethod def get_api_event(self): """ Get an API event instance """ pass def get_file_entry(self): """ Get information for a file that should be uploaded before this event is sent """ pass def get_iteration(self): return self._iter def update(self, task=None, iter_offset=None, **kwargs): """ Update event properties """ if task: self._task = task if iter_offset is not None and self._iter is not None: self._iter += iter_offset def _get_base_dict(self): """ Get a dict with the base attributes """ res = dict( task=self._task, timestamp=self._timestamp, metric=self._metric, variant=self._variant ) if self._iter is not None: res.update(iter=self._iter) if self._model_event is not None: res.update(model_event=self._model_event) return res @classmethod def _convert_np_nan_inf(cls, val): if np.isnan(val): cls._report_nan_warning_iteration += 1 if cls._report_nan_warning_iteration >= cls.report_nan_warning_period: LoggerRoot.get_base_logger().info( "NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format( cls.default_nan_value ) ) cls._report_nan_warning_iteration = 0 return cls.default_nan_value if np.isinf(val): cls._report_inf_warning_iteration += 1 if cls._report_inf_warning_iteration >= cls.report_inf_warning_period:
LoggerRoot.get_base_logger().info( "inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format( cls.default_inf_value ) ) cls._report_inf_warning_iteration = 0 return cls.default_inf_value return val class ScalarEvent(MetricsEventAdapter): """ Scalar event adapter """ def __init__(self, metric, variant, value, iter, **kwargs): self._value = self._convert_np_nan_inf(value) super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsScalarEvent( value=self._value, **self._get_base_dict()) class ConsoleEvent(MetricsEventAdapter): """ Console log event adapter """ def __init__(self, message, level, worker, **kwargs): self._value = str(message) self._level = getLevelName(level) if isinstance(level, int) else str(level) self._worker = worker super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs) def get_api_event(self): return events.TaskLogEvent( task=self._task, timestamp=self._timestamp, level=self._level, worker=self._worker, msg=self._value) class VectorEvent(MetricsEventAdapter): """ Vector event adapter """ def __init__(self, metric, variant, values, iter, **kwargs): self._values = [self._convert_np_nan_inf(v) for v in values] super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsVectorEvent( values=self._values, **self._get_base_dict()) class PlotEvent(MetricsEventAdapter): """ Plot event adapter """ def __init__(self, metric, variant, plot_str, iter=None, **kwargs): self._plot_str = plot_str super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsPlotEvent( plot_str=self._plot_str, **self._get_base_dict()) class ImageEventNoUpload(MetricsEventAdapter): def __init__(self, metric, variant, src, iter=0, **kwargs): self._url = src parts = urlparse(src) self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment)) super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict()) class UploadEvent(MetricsEventAdapter): """ Image event adapter """ _format = deferred_config( 'metrics.images.format', 'JPEG', transform=lambda x: '.' + str(x).upper().lstrip('.') ) _quality = deferred_config('metrics.images.quality', 87, transform=int) _subsampling = deferred_config('metrics.images.subsampling', 0, transform=int) _file_history_size = deferred_config('metrics.file_history_size', 5, transform=int) _upload_retries = 3 _metric_counters = {} _metric_counters_lock = SingletonLock() @staticmethod def _replace_slash(part): # replace the three quote symbols we cannot have, # notice % will be converted to %25 when the link is quoted, so we should not use it # Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r" return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n", part.replace('\\', '/').strip('/').replace('/', '.slash.')) def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): # param override_filename: override uploaded file name (notice extension will be added from local path # param override_filename_ext: override uploaded file extension if image_data is not None and ( not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))): raise ValueError('Image must have a shape attribute') self._image_data = image_data self._local_image_path = local_image_path self._url = None self._key = None self._count = None self._filename = None self.file_history_size = file_history_size or int(self._file_history_size) self._override_filename = kwargs.pop('override_filename', None) self._upload_uri = upload_uri self._delete_after_upload = delete_after_upload # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz self._override_filename_ext = kwargs.pop('override_filename_ext', None) self._upload_filename = None self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None) self.retries = self._upload_retries super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs) def _generate_file_name(self, force_pid_suffix=None): if force_pid_suffix is None and self._filename is not None: return self._count = self._get_metric_count(self._metric, self._variant) self._filename = self._override_filename if not self._filename: self._filename = '{}_{}'.format(self._metric, self._variant) cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size) self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \ if force_pid_suffix else '_{:08d}'.format(cnt) # make sure we have to '/' in the filename because it might access other folders, # and we don't want that to occur self._filename = self._replace_slash(self._filename) # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz filename_ext = self._override_filename_ext if filename_ext is None: filename_ext = str(self._format).lower() if self._image_data is not None else \ '.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:]) # always add file extension to the uploaded target file if filename_ext and filename_ext[0] != '.': filename_ext = '.' + filename_ext self._upload_filename = pathlib2.Path(self._filename).as_posix() if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]: self._upload_filename += filename_ext @classmethod def _get_metric_count(cls, metric, variant, next=True): """ Returns the next count number for the given metric/variant (rotates every few calls) """ counters = cls._metric_counters key = '%s_%s' % (metric, variant) try: cls._metric_counters_lock.acquire() value = counters.get(key, -1) if next: value = counters[key] = value + 1 return value finally: cls._metric_counters_lock.release() # return No event (just the upload) def get_api_event(self): return None def update(self, url=None, key=None, **kwargs): super(UploadEvent, self).update(**kwargs) if url is not None: self._url = url if key is not None: self._key = key def get_file_entry(self): self._generate_file_name() local_file = None # Notice that in case we are running with reporter in subprocess, # when we are here, the cls._metric_counters is actually empty, # since it was updated on the main process and this function is running from the subprocess. # # In the future, if we want to support multi processes reporting images with the same title/series, # we should move the _count & _filename selection into the subprocess, not the main process. # For the time being, this will remain a limitation of the Image reporting mechanism. if isinstance(self._image_data, (six.StringIO, six.BytesIO)): output = self._image_data elif self._image_data is not None: image_data = self._image_data if not isinstance(image_data, np.ndarray): # try conversion, if it fails we'll leave it to the user. image_data = np.ndarray(image_data, dtype=np.uint8) image_data = np.atleast_3d(image_data) if image_data.dtype != np.uint8: if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0: image_data = (image_data * 255).astype(np.uint8) else: image_data = image_data.astype(np.uint8) shape = image_data.shape height, width, channel = shape[:3] if channel == 1: image_data = np.reshape(image_data, (height, width)) # serialize image image = Image.fromarray(image_data) output = six.BytesIO() image_format = Image.registered_extensions().get(str(self._format).lower(), 'JPEG') image.save(output, format=image_format, quality=int(self._quality)) output.seek(0) else: # noinspection PyBroadException try: output = pathlib2.Path(self._local_image_path) if output.is_file(): local_file = output else: output = None except Exception: output = None if output is None: LoggerRoot.get_base_logger().warning( 'Skipping upload, could not find object file \'{}\''.format(self._local_image_path)) return None return self.FileEntry( event=self, name=self._upload_filename, stream=output, url_prop='url', key_prop='key', upload_uri=self._upload_uri, delete_local_file=local_file if self._delete_after_upload else None, retries=self.retries, ) def get_target_full_upload_uri(self, storage_uri, storage_key_prefix=None, quote_uri=True): def limit_path_folder_length(folder_path): if not folder_path or len(folder_path) <= 250: return folder_path parts = folder_path.split('.') if len(parts) > 1: prefix = hashlib.md5(str('.'.join(parts[:-1])).encode('utf-8')).hexdigest() new_path = '{}.{}'.format(prefix, parts[-1]) if len(new_path) <= 250: return new_path return hashlib.md5(str(folder_path).encode('utf-8')).hexdigest() self._generate_file_name() e_storage_uri = self._upload_uri or storage_uri # if we have an entry (with or without a stream), we'll generate the URL and store it in the event filename = self._upload_filename if self._override_storage_key_prefix or not storage_key_prefix: storage_key_prefix = self._override_storage_key_prefix key = '/'.join(x for x in (storage_key_prefix, self._replace_slash(self.metric), self._replace_slash(self.variant), self._replace_slash(filename) ) if x) key = '/'.join(limit_path_folder_length(x) for x in key.split('/')) url = '/'.join(x.strip('/') for x in (e_storage_uri, key)) # make sure we preserve local path root if e_storage_uri.startswith('/'): url = '/' + url if quote_uri: url = quote_url(url) return key, url class ImageEvent(UploadEvent): def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(ImageEvent, self).__init__(metric, variant, image_data=image_data, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() ) class MediaEvent(UploadEvent): def __init__(self, metric, variant, stream, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(MediaEvent, self).__init__(metric, variant, image_data=stream, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() )
random_line_split
events.py
import abc import hashlib import time from functools import reduce from logging import getLevelName import attr import numpy as np import pathlib2 import six from PIL import Image from six.moves.urllib.parse import urlparse, urlunparse from ...backend_api.services import events from ...config import deferred_config from ...debugging.log import LoggerRoot from ...storage.util import quote_url from ...utilities.attrs import attrs from ...utilities.process.mp import SingletonLock @six.add_metaclass(abc.ABCMeta) class MetricsEventAdapter(object): """ Adapter providing all the base attributes required by a metrics event and defining an interface used by the metrics manager when batching and writing events. """ default_nan_value = 0. default_inf_value = 0. """ Default value used when a np.nan or np.inf value is encountered """ report_nan_warning_period = 1000 report_inf_warning_period = 1000 _report_nan_warning_iteration = float('inf') _report_inf_warning_iteration = float('inf') @attrs(cmp=False, slots=True) class FileEntry(object): """ File entry used to report on file data that needs to be uploaded prior to sending the event """ event = attr.attrib() name = attr.attrib() """ File name """ stream = attr.attrib() """ File-like object containing the file's data """ url_prop = attr.attrib() """ Property name that should be updated with the uploaded url """ key_prop = attr.attrib() upload_uri = attr.attrib() url = attr.attrib(default=None) exception = attr.attrib(default=None) retries = attr.attrib(default=None) delete_local_file = attr.attrib(default=True) """ Local file path, if exists, delete the file after upload completed """ def set_exception(self, exp): self.exception = exp self.event.upload_exception = exp @property def metric(self): return self._metric @metric.setter def metric(self, value): self._metric = value @property def variant(self): return self._variant def __init__(self, metric, variant, iter=None, timestamp=None, task=None, gen_timestamp_if_none=True, model_event=None): if not timestamp and gen_timestamp_if_none: timestamp = int(time.time() * 1000) self._metric = metric self._variant = variant self._iter = iter self._timestamp = timestamp self._task = task self._model_event = model_event # Try creating an event just to trigger validation _ = self.get_api_event() self.upload_exception = None @abc.abstractmethod def get_api_event(self): """ Get an API event instance """ pass def get_file_entry(self): """ Get information for a file that should be uploaded before this event is sent """ pass def get_iteration(self): return self._iter def update(self, task=None, iter_offset=None, **kwargs): """ Update event properties """ if task: self._task = task if iter_offset is not None and self._iter is not None: self._iter += iter_offset def _get_base_dict(self): """ Get a dict with the base attributes """ res = dict( task=self._task, timestamp=self._timestamp, metric=self._metric, variant=self._variant ) if self._iter is not None: res.update(iter=self._iter) if self._model_event is not None: res.update(model_event=self._model_event) return res @classmethod def _convert_np_nan_inf(cls, val): if np.isnan(val): cls._report_nan_warning_iteration += 1 if cls._report_nan_warning_iteration >= cls.report_nan_warning_period: LoggerRoot.get_base_logger().info( "NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format( cls.default_nan_value ) ) cls._report_nan_warning_iteration = 0 return cls.default_nan_value if np.isinf(val): cls._report_inf_warning_iteration += 1 if cls._report_inf_warning_iteration >= cls.report_inf_warning_period: LoggerRoot.get_base_logger().info( "inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format( cls.default_inf_value ) ) cls._report_inf_warning_iteration = 0 return cls.default_inf_value return val class ScalarEvent(MetricsEventAdapter): """ Scalar event adapter """ def __init__(self, metric, variant, value, iter, **kwargs): self._value = self._convert_np_nan_inf(value) super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsScalarEvent( value=self._value, **self._get_base_dict()) class ConsoleEvent(MetricsEventAdapter): """ Console log event adapter """ def __init__(self, message, level, worker, **kwargs): self._value = str(message) self._level = getLevelName(level) if isinstance(level, int) else str(level) self._worker = worker super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs) def get_api_event(self): return events.TaskLogEvent( task=self._task, timestamp=self._timestamp, level=self._level, worker=self._worker, msg=self._value) class VectorEvent(MetricsEventAdapter): """ Vector event adapter """ def __init__(self, metric, variant, values, iter, **kwargs): self._values = [self._convert_np_nan_inf(v) for v in values] super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsVectorEvent( values=self._values, **self._get_base_dict()) class PlotEvent(MetricsEventAdapter): """ Plot event adapter """ def __init__(self, metric, variant, plot_str, iter=None, **kwargs): self._plot_str = plot_str super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsPlotEvent( plot_str=self._plot_str, **self._get_base_dict()) class ImageEventNoUpload(MetricsEventAdapter): def __init__(self, metric, variant, src, iter=0, **kwargs): self._url = src parts = urlparse(src) self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment)) super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict()) class UploadEvent(MetricsEventAdapter): """ Image event adapter """ _format = deferred_config( 'metrics.images.format', 'JPEG', transform=lambda x: '.' + str(x).upper().lstrip('.') ) _quality = deferred_config('metrics.images.quality', 87, transform=int) _subsampling = deferred_config('metrics.images.subsampling', 0, transform=int) _file_history_size = deferred_config('metrics.file_history_size', 5, transform=int) _upload_retries = 3 _metric_counters = {} _metric_counters_lock = SingletonLock() @staticmethod def _replace_slash(part): # replace the three quote symbols we cannot have, # notice % will be converted to %25 when the link is quoted, so we should not use it # Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r" return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n", part.replace('\\', '/').strip('/').replace('/', '.slash.')) def
(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): # param override_filename: override uploaded file name (notice extension will be added from local path # param override_filename_ext: override uploaded file extension if image_data is not None and ( not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))): raise ValueError('Image must have a shape attribute') self._image_data = image_data self._local_image_path = local_image_path self._url = None self._key = None self._count = None self._filename = None self.file_history_size = file_history_size or int(self._file_history_size) self._override_filename = kwargs.pop('override_filename', None) self._upload_uri = upload_uri self._delete_after_upload = delete_after_upload # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz self._override_filename_ext = kwargs.pop('override_filename_ext', None) self._upload_filename = None self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None) self.retries = self._upload_retries super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs) def _generate_file_name(self, force_pid_suffix=None): if force_pid_suffix is None and self._filename is not None: return self._count = self._get_metric_count(self._metric, self._variant) self._filename = self._override_filename if not self._filename: self._filename = '{}_{}'.format(self._metric, self._variant) cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size) self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \ if force_pid_suffix else '_{:08d}'.format(cnt) # make sure we have to '/' in the filename because it might access other folders, # and we don't want that to occur self._filename = self._replace_slash(self._filename) # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz filename_ext = self._override_filename_ext if filename_ext is None: filename_ext = str(self._format).lower() if self._image_data is not None else \ '.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:]) # always add file extension to the uploaded target file if filename_ext and filename_ext[0] != '.': filename_ext = '.' + filename_ext self._upload_filename = pathlib2.Path(self._filename).as_posix() if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]: self._upload_filename += filename_ext @classmethod def _get_metric_count(cls, metric, variant, next=True): """ Returns the next count number for the given metric/variant (rotates every few calls) """ counters = cls._metric_counters key = '%s_%s' % (metric, variant) try: cls._metric_counters_lock.acquire() value = counters.get(key, -1) if next: value = counters[key] = value + 1 return value finally: cls._metric_counters_lock.release() # return No event (just the upload) def get_api_event(self): return None def update(self, url=None, key=None, **kwargs): super(UploadEvent, self).update(**kwargs) if url is not None: self._url = url if key is not None: self._key = key def get_file_entry(self): self._generate_file_name() local_file = None # Notice that in case we are running with reporter in subprocess, # when we are here, the cls._metric_counters is actually empty, # since it was updated on the main process and this function is running from the subprocess. # # In the future, if we want to support multi processes reporting images with the same title/series, # we should move the _count & _filename selection into the subprocess, not the main process. # For the time being, this will remain a limitation of the Image reporting mechanism. if isinstance(self._image_data, (six.StringIO, six.BytesIO)): output = self._image_data elif self._image_data is not None: image_data = self._image_data if not isinstance(image_data, np.ndarray): # try conversion, if it fails we'll leave it to the user. image_data = np.ndarray(image_data, dtype=np.uint8) image_data = np.atleast_3d(image_data) if image_data.dtype != np.uint8: if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0: image_data = (image_data * 255).astype(np.uint8) else: image_data = image_data.astype(np.uint8) shape = image_data.shape height, width, channel = shape[:3] if channel == 1: image_data = np.reshape(image_data, (height, width)) # serialize image image = Image.fromarray(image_data) output = six.BytesIO() image_format = Image.registered_extensions().get(str(self._format).lower(), 'JPEG') image.save(output, format=image_format, quality=int(self._quality)) output.seek(0) else: # noinspection PyBroadException try: output = pathlib2.Path(self._local_image_path) if output.is_file(): local_file = output else: output = None except Exception: output = None if output is None: LoggerRoot.get_base_logger().warning( 'Skipping upload, could not find object file \'{}\''.format(self._local_image_path)) return None return self.FileEntry( event=self, name=self._upload_filename, stream=output, url_prop='url', key_prop='key', upload_uri=self._upload_uri, delete_local_file=local_file if self._delete_after_upload else None, retries=self.retries, ) def get_target_full_upload_uri(self, storage_uri, storage_key_prefix=None, quote_uri=True): def limit_path_folder_length(folder_path): if not folder_path or len(folder_path) <= 250: return folder_path parts = folder_path.split('.') if len(parts) > 1: prefix = hashlib.md5(str('.'.join(parts[:-1])).encode('utf-8')).hexdigest() new_path = '{}.{}'.format(prefix, parts[-1]) if len(new_path) <= 250: return new_path return hashlib.md5(str(folder_path).encode('utf-8')).hexdigest() self._generate_file_name() e_storage_uri = self._upload_uri or storage_uri # if we have an entry (with or without a stream), we'll generate the URL and store it in the event filename = self._upload_filename if self._override_storage_key_prefix or not storage_key_prefix: storage_key_prefix = self._override_storage_key_prefix key = '/'.join(x for x in (storage_key_prefix, self._replace_slash(self.metric), self._replace_slash(self.variant), self._replace_slash(filename) ) if x) key = '/'.join(limit_path_folder_length(x) for x in key.split('/')) url = '/'.join(x.strip('/') for x in (e_storage_uri, key)) # make sure we preserve local path root if e_storage_uri.startswith('/'): url = '/' + url if quote_uri: url = quote_url(url) return key, url class ImageEvent(UploadEvent): def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(ImageEvent, self).__init__(metric, variant, image_data=image_data, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() ) class MediaEvent(UploadEvent): def __init__(self, metric, variant, stream, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(MediaEvent, self).__init__(metric, variant, image_data=stream, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() )
__init__
identifier_name
events.py
import abc import hashlib import time from functools import reduce from logging import getLevelName import attr import numpy as np import pathlib2 import six from PIL import Image from six.moves.urllib.parse import urlparse, urlunparse from ...backend_api.services import events from ...config import deferred_config from ...debugging.log import LoggerRoot from ...storage.util import quote_url from ...utilities.attrs import attrs from ...utilities.process.mp import SingletonLock @six.add_metaclass(abc.ABCMeta) class MetricsEventAdapter(object): """ Adapter providing all the base attributes required by a metrics event and defining an interface used by the metrics manager when batching and writing events. """ default_nan_value = 0. default_inf_value = 0. """ Default value used when a np.nan or np.inf value is encountered """ report_nan_warning_period = 1000 report_inf_warning_period = 1000 _report_nan_warning_iteration = float('inf') _report_inf_warning_iteration = float('inf') @attrs(cmp=False, slots=True) class FileEntry(object): """ File entry used to report on file data that needs to be uploaded prior to sending the event """ event = attr.attrib() name = attr.attrib() """ File name """ stream = attr.attrib() """ File-like object containing the file's data """ url_prop = attr.attrib() """ Property name that should be updated with the uploaded url """ key_prop = attr.attrib() upload_uri = attr.attrib() url = attr.attrib(default=None) exception = attr.attrib(default=None) retries = attr.attrib(default=None) delete_local_file = attr.attrib(default=True) """ Local file path, if exists, delete the file after upload completed """ def set_exception(self, exp): self.exception = exp self.event.upload_exception = exp @property def metric(self): return self._metric @metric.setter def metric(self, value): self._metric = value @property def variant(self): return self._variant def __init__(self, metric, variant, iter=None, timestamp=None, task=None, gen_timestamp_if_none=True, model_event=None): if not timestamp and gen_timestamp_if_none: timestamp = int(time.time() * 1000) self._metric = metric self._variant = variant self._iter = iter self._timestamp = timestamp self._task = task self._model_event = model_event # Try creating an event just to trigger validation _ = self.get_api_event() self.upload_exception = None @abc.abstractmethod def get_api_event(self): """ Get an API event instance """ pass def get_file_entry(self): """ Get information for a file that should be uploaded before this event is sent """ pass def get_iteration(self): return self._iter def update(self, task=None, iter_offset=None, **kwargs):
def _get_base_dict(self): """ Get a dict with the base attributes """ res = dict( task=self._task, timestamp=self._timestamp, metric=self._metric, variant=self._variant ) if self._iter is not None: res.update(iter=self._iter) if self._model_event is not None: res.update(model_event=self._model_event) return res @classmethod def _convert_np_nan_inf(cls, val): if np.isnan(val): cls._report_nan_warning_iteration += 1 if cls._report_nan_warning_iteration >= cls.report_nan_warning_period: LoggerRoot.get_base_logger().info( "NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format( cls.default_nan_value ) ) cls._report_nan_warning_iteration = 0 return cls.default_nan_value if np.isinf(val): cls._report_inf_warning_iteration += 1 if cls._report_inf_warning_iteration >= cls.report_inf_warning_period: LoggerRoot.get_base_logger().info( "inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format( cls.default_inf_value ) ) cls._report_inf_warning_iteration = 0 return cls.default_inf_value return val class ScalarEvent(MetricsEventAdapter): """ Scalar event adapter """ def __init__(self, metric, variant, value, iter, **kwargs): self._value = self._convert_np_nan_inf(value) super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsScalarEvent( value=self._value, **self._get_base_dict()) class ConsoleEvent(MetricsEventAdapter): """ Console log event adapter """ def __init__(self, message, level, worker, **kwargs): self._value = str(message) self._level = getLevelName(level) if isinstance(level, int) else str(level) self._worker = worker super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs) def get_api_event(self): return events.TaskLogEvent( task=self._task, timestamp=self._timestamp, level=self._level, worker=self._worker, msg=self._value) class VectorEvent(MetricsEventAdapter): """ Vector event adapter """ def __init__(self, metric, variant, values, iter, **kwargs): self._values = [self._convert_np_nan_inf(v) for v in values] super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsVectorEvent( values=self._values, **self._get_base_dict()) class PlotEvent(MetricsEventAdapter): """ Plot event adapter """ def __init__(self, metric, variant, plot_str, iter=None, **kwargs): self._plot_str = plot_str super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsPlotEvent( plot_str=self._plot_str, **self._get_base_dict()) class ImageEventNoUpload(MetricsEventAdapter): def __init__(self, metric, variant, src, iter=0, **kwargs): self._url = src parts = urlparse(src) self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment)) super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict()) class UploadEvent(MetricsEventAdapter): """ Image event adapter """ _format = deferred_config( 'metrics.images.format', 'JPEG', transform=lambda x: '.' + str(x).upper().lstrip('.') ) _quality = deferred_config('metrics.images.quality', 87, transform=int) _subsampling = deferred_config('metrics.images.subsampling', 0, transform=int) _file_history_size = deferred_config('metrics.file_history_size', 5, transform=int) _upload_retries = 3 _metric_counters = {} _metric_counters_lock = SingletonLock() @staticmethod def _replace_slash(part): # replace the three quote symbols we cannot have, # notice % will be converted to %25 when the link is quoted, so we should not use it # Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r" return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n", part.replace('\\', '/').strip('/').replace('/', '.slash.')) def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): # param override_filename: override uploaded file name (notice extension will be added from local path # param override_filename_ext: override uploaded file extension if image_data is not None and ( not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))): raise ValueError('Image must have a shape attribute') self._image_data = image_data self._local_image_path = local_image_path self._url = None self._key = None self._count = None self._filename = None self.file_history_size = file_history_size or int(self._file_history_size) self._override_filename = kwargs.pop('override_filename', None) self._upload_uri = upload_uri self._delete_after_upload = delete_after_upload # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz self._override_filename_ext = kwargs.pop('override_filename_ext', None) self._upload_filename = None self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None) self.retries = self._upload_retries super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs) def _generate_file_name(self, force_pid_suffix=None): if force_pid_suffix is None and self._filename is not None: return self._count = self._get_metric_count(self._metric, self._variant) self._filename = self._override_filename if not self._filename: self._filename = '{}_{}'.format(self._metric, self._variant) cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size) self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \ if force_pid_suffix else '_{:08d}'.format(cnt) # make sure we have to '/' in the filename because it might access other folders, # and we don't want that to occur self._filename = self._replace_slash(self._filename) # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz filename_ext = self._override_filename_ext if filename_ext is None: filename_ext = str(self._format).lower() if self._image_data is not None else \ '.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:]) # always add file extension to the uploaded target file if filename_ext and filename_ext[0] != '.': filename_ext = '.' + filename_ext self._upload_filename = pathlib2.Path(self._filename).as_posix() if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]: self._upload_filename += filename_ext @classmethod def _get_metric_count(cls, metric, variant, next=True): """ Returns the next count number for the given metric/variant (rotates every few calls) """ counters = cls._metric_counters key = '%s_%s' % (metric, variant) try: cls._metric_counters_lock.acquire() value = counters.get(key, -1) if next: value = counters[key] = value + 1 return value finally: cls._metric_counters_lock.release() # return No event (just the upload) def get_api_event(self): return None def update(self, url=None, key=None, **kwargs): super(UploadEvent, self).update(**kwargs) if url is not None: self._url = url if key is not None: self._key = key def get_file_entry(self): self._generate_file_name() local_file = None # Notice that in case we are running with reporter in subprocess, # when we are here, the cls._metric_counters is actually empty, # since it was updated on the main process and this function is running from the subprocess. # # In the future, if we want to support multi processes reporting images with the same title/series, # we should move the _count & _filename selection into the subprocess, not the main process. # For the time being, this will remain a limitation of the Image reporting mechanism. if isinstance(self._image_data, (six.StringIO, six.BytesIO)): output = self._image_data elif self._image_data is not None: image_data = self._image_data if not isinstance(image_data, np.ndarray): # try conversion, if it fails we'll leave it to the user. image_data = np.ndarray(image_data, dtype=np.uint8) image_data = np.atleast_3d(image_data) if image_data.dtype != np.uint8: if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0: image_data = (image_data * 255).astype(np.uint8) else: image_data = image_data.astype(np.uint8) shape = image_data.shape height, width, channel = shape[:3] if channel == 1: image_data = np.reshape(image_data, (height, width)) # serialize image image = Image.fromarray(image_data) output = six.BytesIO() image_format = Image.registered_extensions().get(str(self._format).lower(), 'JPEG') image.save(output, format=image_format, quality=int(self._quality)) output.seek(0) else: # noinspection PyBroadException try: output = pathlib2.Path(self._local_image_path) if output.is_file(): local_file = output else: output = None except Exception: output = None if output is None: LoggerRoot.get_base_logger().warning( 'Skipping upload, could not find object file \'{}\''.format(self._local_image_path)) return None return self.FileEntry( event=self, name=self._upload_filename, stream=output, url_prop='url', key_prop='key', upload_uri=self._upload_uri, delete_local_file=local_file if self._delete_after_upload else None, retries=self.retries, ) def get_target_full_upload_uri(self, storage_uri, storage_key_prefix=None, quote_uri=True): def limit_path_folder_length(folder_path): if not folder_path or len(folder_path) <= 250: return folder_path parts = folder_path.split('.') if len(parts) > 1: prefix = hashlib.md5(str('.'.join(parts[:-1])).encode('utf-8')).hexdigest() new_path = '{}.{}'.format(prefix, parts[-1]) if len(new_path) <= 250: return new_path return hashlib.md5(str(folder_path).encode('utf-8')).hexdigest() self._generate_file_name() e_storage_uri = self._upload_uri or storage_uri # if we have an entry (with or without a stream), we'll generate the URL and store it in the event filename = self._upload_filename if self._override_storage_key_prefix or not storage_key_prefix: storage_key_prefix = self._override_storage_key_prefix key = '/'.join(x for x in (storage_key_prefix, self._replace_slash(self.metric), self._replace_slash(self.variant), self._replace_slash(filename) ) if x) key = '/'.join(limit_path_folder_length(x) for x in key.split('/')) url = '/'.join(x.strip('/') for x in (e_storage_uri, key)) # make sure we preserve local path root if e_storage_uri.startswith('/'): url = '/' + url if quote_uri: url = quote_url(url) return key, url class ImageEvent(UploadEvent): def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(ImageEvent, self).__init__(metric, variant, image_data=image_data, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() ) class MediaEvent(UploadEvent): def __init__(self, metric, variant, stream, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(MediaEvent, self).__init__(metric, variant, image_data=stream, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() )
""" Update event properties """ if task: self._task = task if iter_offset is not None and self._iter is not None: self._iter += iter_offset
identifier_body
events.py
import abc import hashlib import time from functools import reduce from logging import getLevelName import attr import numpy as np import pathlib2 import six from PIL import Image from six.moves.urllib.parse import urlparse, urlunparse from ...backend_api.services import events from ...config import deferred_config from ...debugging.log import LoggerRoot from ...storage.util import quote_url from ...utilities.attrs import attrs from ...utilities.process.mp import SingletonLock @six.add_metaclass(abc.ABCMeta) class MetricsEventAdapter(object): """ Adapter providing all the base attributes required by a metrics event and defining an interface used by the metrics manager when batching and writing events. """ default_nan_value = 0. default_inf_value = 0. """ Default value used when a np.nan or np.inf value is encountered """ report_nan_warning_period = 1000 report_inf_warning_period = 1000 _report_nan_warning_iteration = float('inf') _report_inf_warning_iteration = float('inf') @attrs(cmp=False, slots=True) class FileEntry(object): """ File entry used to report on file data that needs to be uploaded prior to sending the event """ event = attr.attrib() name = attr.attrib() """ File name """ stream = attr.attrib() """ File-like object containing the file's data """ url_prop = attr.attrib() """ Property name that should be updated with the uploaded url """ key_prop = attr.attrib() upload_uri = attr.attrib() url = attr.attrib(default=None) exception = attr.attrib(default=None) retries = attr.attrib(default=None) delete_local_file = attr.attrib(default=True) """ Local file path, if exists, delete the file after upload completed """ def set_exception(self, exp): self.exception = exp self.event.upload_exception = exp @property def metric(self): return self._metric @metric.setter def metric(self, value): self._metric = value @property def variant(self): return self._variant def __init__(self, metric, variant, iter=None, timestamp=None, task=None, gen_timestamp_if_none=True, model_event=None): if not timestamp and gen_timestamp_if_none: timestamp = int(time.time() * 1000) self._metric = metric self._variant = variant self._iter = iter self._timestamp = timestamp self._task = task self._model_event = model_event # Try creating an event just to trigger validation _ = self.get_api_event() self.upload_exception = None @abc.abstractmethod def get_api_event(self): """ Get an API event instance """ pass def get_file_entry(self): """ Get information for a file that should be uploaded before this event is sent """ pass def get_iteration(self): return self._iter def update(self, task=None, iter_offset=None, **kwargs): """ Update event properties """ if task: self._task = task if iter_offset is not None and self._iter is not None: self._iter += iter_offset def _get_base_dict(self): """ Get a dict with the base attributes """ res = dict( task=self._task, timestamp=self._timestamp, metric=self._metric, variant=self._variant ) if self._iter is not None: res.update(iter=self._iter) if self._model_event is not None: res.update(model_event=self._model_event) return res @classmethod def _convert_np_nan_inf(cls, val): if np.isnan(val): cls._report_nan_warning_iteration += 1 if cls._report_nan_warning_iteration >= cls.report_nan_warning_period: LoggerRoot.get_base_logger().info( "NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format( cls.default_nan_value ) ) cls._report_nan_warning_iteration = 0 return cls.default_nan_value if np.isinf(val):
return val class ScalarEvent(MetricsEventAdapter): """ Scalar event adapter """ def __init__(self, metric, variant, value, iter, **kwargs): self._value = self._convert_np_nan_inf(value) super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsScalarEvent( value=self._value, **self._get_base_dict()) class ConsoleEvent(MetricsEventAdapter): """ Console log event adapter """ def __init__(self, message, level, worker, **kwargs): self._value = str(message) self._level = getLevelName(level) if isinstance(level, int) else str(level) self._worker = worker super(ConsoleEvent, self).__init__(metric=None, variant=None, iter=0, **kwargs) def get_api_event(self): return events.TaskLogEvent( task=self._task, timestamp=self._timestamp, level=self._level, worker=self._worker, msg=self._value) class VectorEvent(MetricsEventAdapter): """ Vector event adapter """ def __init__(self, metric, variant, values, iter, **kwargs): self._values = [self._convert_np_nan_inf(v) for v in values] super(VectorEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsVectorEvent( values=self._values, **self._get_base_dict()) class PlotEvent(MetricsEventAdapter): """ Plot event adapter """ def __init__(self, metric, variant, plot_str, iter=None, **kwargs): self._plot_str = plot_str super(PlotEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsPlotEvent( plot_str=self._plot_str, **self._get_base_dict()) class ImageEventNoUpload(MetricsEventAdapter): def __init__(self, metric, variant, src, iter=0, **kwargs): self._url = src parts = urlparse(src) self._key = urlunparse(('', '', parts.path, parts.params, parts.query, parts.fragment)) super(ImageEventNoUpload, self).__init__(metric, variant, iter=iter, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict()) class UploadEvent(MetricsEventAdapter): """ Image event adapter """ _format = deferred_config( 'metrics.images.format', 'JPEG', transform=lambda x: '.' + str(x).upper().lstrip('.') ) _quality = deferred_config('metrics.images.quality', 87, transform=int) _subsampling = deferred_config('metrics.images.subsampling', 0, transform=int) _file_history_size = deferred_config('metrics.file_history_size', 5, transform=int) _upload_retries = 3 _metric_counters = {} _metric_counters_lock = SingletonLock() @staticmethod def _replace_slash(part): # replace the three quote symbols we cannot have, # notice % will be converted to %25 when the link is quoted, so we should not use it # Replace quote safe characters: ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," | "\n" | "\r" return reduce(lambda a, b: a.replace(b, "0x{:02x}".format(ord(b))), "#\"\';?:@&=+$,%!\r\n", part.replace('\\', '/').strip('/').replace('/', '.slash.')) def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): # param override_filename: override uploaded file name (notice extension will be added from local path # param override_filename_ext: override uploaded file extension if image_data is not None and ( not hasattr(image_data, 'shape') and not isinstance(image_data, (six.StringIO, six.BytesIO))): raise ValueError('Image must have a shape attribute') self._image_data = image_data self._local_image_path = local_image_path self._url = None self._key = None self._count = None self._filename = None self.file_history_size = file_history_size or int(self._file_history_size) self._override_filename = kwargs.pop('override_filename', None) self._upload_uri = upload_uri self._delete_after_upload = delete_after_upload # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz self._override_filename_ext = kwargs.pop('override_filename_ext', None) self._upload_filename = None self._override_storage_key_prefix = kwargs.pop('override_storage_key_prefix', None) self.retries = self._upload_retries super(UploadEvent, self).__init__(metric, variant, iter=iter, **kwargs) def _generate_file_name(self, force_pid_suffix=None): if force_pid_suffix is None and self._filename is not None: return self._count = self._get_metric_count(self._metric, self._variant) self._filename = self._override_filename if not self._filename: self._filename = '{}_{}'.format(self._metric, self._variant) cnt = self._count if self.file_history_size < 1 else (self._count % self.file_history_size) self._filename += '_{:05x}{:03d}'.format(force_pid_suffix, cnt) \ if force_pid_suffix else '_{:08d}'.format(cnt) # make sure we have to '/' in the filename because it might access other folders, # and we don't want that to occur self._filename = self._replace_slash(self._filename) # get upload uri upfront, either predefined image format or local file extension # e.g.: image.png -> .png or image.raw.gz -> .raw.gz filename_ext = self._override_filename_ext if filename_ext is None: filename_ext = str(self._format).lower() if self._image_data is not None else \ '.' + '.'.join(pathlib2.Path(self._local_image_path).parts[-1].split('.')[1:]) # always add file extension to the uploaded target file if filename_ext and filename_ext[0] != '.': filename_ext = '.' + filename_ext self._upload_filename = pathlib2.Path(self._filename).as_posix() if self._filename.rpartition(".")[2] != filename_ext.rpartition(".")[2]: self._upload_filename += filename_ext @classmethod def _get_metric_count(cls, metric, variant, next=True): """ Returns the next count number for the given metric/variant (rotates every few calls) """ counters = cls._metric_counters key = '%s_%s' % (metric, variant) try: cls._metric_counters_lock.acquire() value = counters.get(key, -1) if next: value = counters[key] = value + 1 return value finally: cls._metric_counters_lock.release() # return No event (just the upload) def get_api_event(self): return None def update(self, url=None, key=None, **kwargs): super(UploadEvent, self).update(**kwargs) if url is not None: self._url = url if key is not None: self._key = key def get_file_entry(self): self._generate_file_name() local_file = None # Notice that in case we are running with reporter in subprocess, # when we are here, the cls._metric_counters is actually empty, # since it was updated on the main process and this function is running from the subprocess. # # In the future, if we want to support multi processes reporting images with the same title/series, # we should move the _count & _filename selection into the subprocess, not the main process. # For the time being, this will remain a limitation of the Image reporting mechanism. if isinstance(self._image_data, (six.StringIO, six.BytesIO)): output = self._image_data elif self._image_data is not None: image_data = self._image_data if not isinstance(image_data, np.ndarray): # try conversion, if it fails we'll leave it to the user. image_data = np.ndarray(image_data, dtype=np.uint8) image_data = np.atleast_3d(image_data) if image_data.dtype != np.uint8: if np.issubdtype(image_data.dtype, np.floating) and image_data.max() <= 1.0: image_data = (image_data * 255).astype(np.uint8) else: image_data = image_data.astype(np.uint8) shape = image_data.shape height, width, channel = shape[:3] if channel == 1: image_data = np.reshape(image_data, (height, width)) # serialize image image = Image.fromarray(image_data) output = six.BytesIO() image_format = Image.registered_extensions().get(str(self._format).lower(), 'JPEG') image.save(output, format=image_format, quality=int(self._quality)) output.seek(0) else: # noinspection PyBroadException try: output = pathlib2.Path(self._local_image_path) if output.is_file(): local_file = output else: output = None except Exception: output = None if output is None: LoggerRoot.get_base_logger().warning( 'Skipping upload, could not find object file \'{}\''.format(self._local_image_path)) return None return self.FileEntry( event=self, name=self._upload_filename, stream=output, url_prop='url', key_prop='key', upload_uri=self._upload_uri, delete_local_file=local_file if self._delete_after_upload else None, retries=self.retries, ) def get_target_full_upload_uri(self, storage_uri, storage_key_prefix=None, quote_uri=True): def limit_path_folder_length(folder_path): if not folder_path or len(folder_path) <= 250: return folder_path parts = folder_path.split('.') if len(parts) > 1: prefix = hashlib.md5(str('.'.join(parts[:-1])).encode('utf-8')).hexdigest() new_path = '{}.{}'.format(prefix, parts[-1]) if len(new_path) <= 250: return new_path return hashlib.md5(str(folder_path).encode('utf-8')).hexdigest() self._generate_file_name() e_storage_uri = self._upload_uri or storage_uri # if we have an entry (with or without a stream), we'll generate the URL and store it in the event filename = self._upload_filename if self._override_storage_key_prefix or not storage_key_prefix: storage_key_prefix = self._override_storage_key_prefix key = '/'.join(x for x in (storage_key_prefix, self._replace_slash(self.metric), self._replace_slash(self.variant), self._replace_slash(filename) ) if x) key = '/'.join(limit_path_folder_length(x) for x in key.split('/')) url = '/'.join(x.strip('/') for x in (e_storage_uri, key)) # make sure we preserve local path root if e_storage_uri.startswith('/'): url = '/' + url if quote_uri: url = quote_url(url) return key, url class ImageEvent(UploadEvent): def __init__(self, metric, variant, image_data, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(ImageEvent, self).__init__(metric, variant, image_data=image_data, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() ) class MediaEvent(UploadEvent): def __init__(self, metric, variant, stream, local_image_path=None, iter=0, upload_uri=None, file_history_size=None, delete_after_upload=False, **kwargs): super(MediaEvent, self).__init__(metric, variant, image_data=stream, local_image_path=local_image_path, iter=iter, upload_uri=upload_uri, file_history_size=file_history_size, delete_after_upload=delete_after_upload, **kwargs) def get_api_event(self): return events.MetricsImageEvent( url=self._url, key=self._key, **self._get_base_dict() )
cls._report_inf_warning_iteration += 1 if cls._report_inf_warning_iteration >= cls.report_inf_warning_period: LoggerRoot.get_base_logger().info( "inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format( cls.default_inf_value ) ) cls._report_inf_warning_iteration = 0 return cls.default_inf_value
conditional_block
merkle.rs
// LNP/BP client-side-validation foundation libraries implementing LNPBP // specifications & standards (LNPBP-4, 7, 8, 9, 42, 81) // // Written in 2019-2021 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the Apache 2.0 License along with this // software. If not, see <https://opensource.org/licenses/Apache-2.0>. //! Merklization procedures for client-side-validation according to [LNPBP-81] //! standard. //! //! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md use std::io; use std::iter::FromIterator; use bitcoin_hashes::{sha256, Hash, HashEngine}; use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit}; /// Marker trait for types that require merklization of the underlying data /// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the /// tagged hash used in the merklization (see [`merklize`]). pub trait ConsensusMerkleCommit: ConsensusCommit<Commitment = MerkleNode> { /// The tag prefix which will be used in the merklization process (see /// [`merklize`]) const MERKLE_NODE_PREFIX: &'static str; } hash_newtype!( MerkleNode, sha256::Hash, 32, doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root", false // We do not reverse displaying MerkleNodes in hexadecimal ); impl strict_encoding::Strategy for MerkleNode { type Strategy = strict_encoding::strategies::HashFixedBytes; } impl commit_encode::Strategy for MerkleNode { type Strategy = commit_encode::strategies::UsingStrict; } impl<MSG> CommitVerify<MSG> for MerkleNode where MSG: AsRef<[u8]>, { #[inline] fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) } } impl<A, B> ConsensusCommit for (A, B) where A: CommitEncode, B: CommitEncode, { type Commitment = MerkleNode; } impl<A, B, C> ConsensusCommit for (A, B, C) where A: CommitEncode, B: CommitEncode, C: CommitEncode, { type Commitment = MerkleNode; } /// Merklization procedure that uses tagged hashes with depth commitments /// according to [LNPBP-81] standard of client-side-validation merklization /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8) where I: IntoIterator<Item = MerkleNode>, <I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>, { let mut tag_engine = sha256::Hash::engine(); tag_engine.input(prefix.as_bytes()); tag_engine.input(":merkle:".as_bytes()); let iter = data.into_iter(); let width = iter.len(); // Tagging merkle tree root let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None); tag_engine.input("root:height=".as_bytes()); tag_engine.input(&height.to_string().into_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(&width.to_string().into_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); let mut engine = MerkleNode::engine(); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); root.commit_encode(&mut engine); let tagged_root = MerkleNode::from_engine(engine); (tagged_root, height) } // TODO: Optimize to avoid allocations // In current rust generic iterators do not work with recursion :( fn merklize_inner( engine_proto: &sha256::HashEngine, mut iter: impl ExactSizeIterator<Item = MerkleNode>, depth: u8, extend: bool, empty_node: Option<MerkleNode>, ) -> (MerkleNode, u8) { let len = iter.len() + extend as usize; let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF])); // Computing tagged hash as per BIP-340 let mut tag_engine = engine_proto.clone(); tag_engine.input("depth=".as_bytes()); tag_engine.input(depth.to_string().as_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(len.to_string().as_bytes()); tag_engine.input(":height=".as_bytes()); let mut engine = MerkleNode::engine(); if len <= 2 { tag_engine.input("0:".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); let mut leaf_tag_engine = engine_proto.clone(); leaf_tag_engine.input("leaf".as_bytes()); let leaf_tag = sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine)); let mut leaf_engine = MerkleNode::engine(); leaf_engine.input(&leaf_tag[..]); leaf_engine.input(&leaf_tag[..]); let mut leaf1 = leaf_engine.clone(); leaf1.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf1).commit_encode(&mut engine); leaf_engine.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine); (MerkleNode::from_engine(engine), 1) } else
} /// The source data for the [LNPBP-81] merklization process. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)] pub struct MerkleSource<T>( /// Array of the data which will be merklized pub Vec<T>, ); impl<L, I> From<I> for MerkleSource<L> where I: IntoIterator<Item = L>, L: CommitEncode, { fn from(collection: I) -> Self { Self(collection.into_iter().collect()) } } impl<L> FromIterator<L> for MerkleSource<L> where L: CommitEncode, { fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self { iter.into_iter().collect::<Vec<_>>().into() } } impl<L> CommitEncode for MerkleSource<L> where L: ConsensusMerkleCommit, { fn commit_encode<E: io::Write>(&self, e: E) -> usize { let leafs = self.0.iter().map(L::consensus_commit); merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e) } } impl<L> ConsensusCommit for MerkleSource<L> where L: ConsensusMerkleCommit + CommitEncode, { type Commitment = MerkleNode; #[inline] fn consensus_commit(&self) -> Self::Commitment { MerkleNode::from_slice(&self.commit_serialize()) .expect("MerkleSource::commit_serialize must produce MerkleNode") } #[inline] fn consensus_verify(&self, commitment: &Self::Commitment) -> bool { self.consensus_commit() == *commitment } } /// Converts given piece of client-side-validated data into a structure which /// can be used in merklization process. /// /// This dedicated structure is required since with /// `impl From<_> for MerkleSource` we would not be able to specify a concrete /// tagged hash, which we require in [LNPBP-81] merklization and which we /// provide here via [`ToMerkleSource::Leaf`]` associated type holding /// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub trait ToMerkleSource { /// Defining type of the commitment produced during merlization process type Leaf: ConsensusMerkleCommit; /// Performs transformation of the data type into a merkilzable data fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>; } #[cfg(test)] mod test { use std::collections::BTreeMap; use amplify::{bmap, s}; use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::{sha256d, Hash}; use strict_encoding::StrictEncode; use super::*; use crate::commit_encode::{strategies, Strategy}; use crate::CommitConceal; #[test] fn collections() { // First, we define a data type #[derive( Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, StrictEncode, StrictDecode )] struct Item(pub String); // Next, we say that it should be concealed using some function // (double SHA256 hash in this case) impl CommitConceal for Item { type ConcealedCommitment = sha256d::Hash; fn commit_conceal(&self) -> Self::ConcealedCommitment { sha256d::Hash::hash(self.0.as_bytes()) } } // Next, we need to specify how the concealed data should be // commit-encoded: this time we strict-serialize the hash impl Strategy for sha256d::Hash { type Strategy = strategies::UsingStrict; } // Now, we define commitment encoding for our concealable type: it // should conceal the data impl Strategy for Item { type Strategy = strategies::UsingConceal; } // Now, we need to say that consensus commit procedure should produce // a final commitment from commit-encoded data (equal to the // strict encoding of the conceal result) using `CommitVerify` type. // Here, we use another round of hashing, producing merkle node hash // from the concealed data. impl ConsensusCommit for Item { type Commitment = MerkleNode; } // Next, we need to provide merkle node tags for each type of the tree impl ConsensusMerkleCommit for Item { const MERKLE_NODE_PREFIX: &'static str = "item"; } impl ConsensusMerkleCommit for (usize, Item) { const MERKLE_NODE_PREFIX: &'static str = "usize->item"; } impl ToMerkleSource for BTreeMap<usize, Item> { type Leaf = (usize, Item); fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> { self.iter().map(|(k, v)| (*k, v.clone())).collect() } } let large = vec![Item(s!("none")); 3]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71" ); let large = vec![Item(s!("none")); 5]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94" ); let large = vec![Item(s!("none")); 9]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f" ); let large = vec![Item(s!("none")); 13]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d" ); let large = vec![Item(s!("none")); 17]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f" ); let item = Item(s!("Some text")); assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap()); assert_eq!( "6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e", item.commit_serialize().to_hex() ); assert_eq!( "3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30", item.consensus_commit().to_hex() ); assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap()); assert_eq!( MerkleNode::hash(&item.commit_serialize()), item.consensus_commit() ); let original = bmap! { 0usize => Item(s!("My first case")), 1usize => Item(s!("My second case with a very long string")), 3usize => Item(s!("My third case to make the Merkle tree two layered")) }; let collection = original.to_merkle_source(); assert_eq!( &b"\x03\x00\ \x00\x00\ \x0d\x00\ My first case\ \x01\x00\ \x26\x00\ My second case with a very long string\ \x03\x00\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.commit_serialize().to_hex() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.consensus_commit().to_hex() ); assert_ne!( collection.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&collection.commit_serialize()).unwrap(), collection.consensus_commit() ); let original = vec![ Item(s!("My first case")), Item(s!("My second case with a very long string")), Item(s!("My third case to make the Merkle tree two layered")), ]; let vec: MerkleSource<Item> = original.clone().into(); assert_eq!( &b"\x03\x00\ \x0d\x00\ My first case\ \x26\x00\ My second case with a very long string\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.commit_serialize().to_hex() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.consensus_commit().to_hex() ); assert_ne!( vec.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&vec.commit_serialize()).unwrap(), vec.consensus_commit() ); assert_ne!(vec.consensus_commit(), collection.consensus_commit()); } }
{ let div = len / 2 + len % 2; let (node1, height1) = merklize_inner( engine_proto, // Normally we should use `iter.by_ref().take(div)`, but currently // rust compilers is unable to parse recursion with generic types iter.by_ref().take(div).collect::<Vec<_>>().into_iter(), depth + 1, false, Some(empty_node), ); let iter = if extend { iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter() } else { iter.collect::<Vec<_>>().into_iter() }; let (node2, height2) = merklize_inner( engine_proto, iter, depth + 1, (div % 2 + len % 2) / 2 == 1, Some(empty_node), ); assert_eq!( height1, height2, "merklization algorithm failure: height of subtrees is not equal \ (width = {}, depth = {}, prev_extend = {}, next_extend = {})", len, depth, extend, div % 2 == 1 && len % 2 == 1 ); tag_engine.input(height1.to_string().as_bytes()); tag_engine.input(":".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); node1.commit_encode(&mut engine); node2.commit_encode(&mut engine); (MerkleNode::from_engine(engine), height1 + 1) }
conditional_block
merkle.rs
// LNP/BP client-side-validation foundation libraries implementing LNPBP // specifications & standards (LNPBP-4, 7, 8, 9, 42, 81) // // Written in 2019-2021 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the Apache 2.0 License along with this // software. If not, see <https://opensource.org/licenses/Apache-2.0>. //! Merklization procedures for client-side-validation according to [LNPBP-81] //! standard. //! //! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md use std::io; use std::iter::FromIterator; use bitcoin_hashes::{sha256, Hash, HashEngine}; use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit}; /// Marker trait for types that require merklization of the underlying data /// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the /// tagged hash used in the merklization (see [`merklize`]). pub trait ConsensusMerkleCommit: ConsensusCommit<Commitment = MerkleNode> { /// The tag prefix which will be used in the merklization process (see /// [`merklize`]) const MERKLE_NODE_PREFIX: &'static str; } hash_newtype!( MerkleNode, sha256::Hash, 32, doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root", false // We do not reverse displaying MerkleNodes in hexadecimal ); impl strict_encoding::Strategy for MerkleNode { type Strategy = strict_encoding::strategies::HashFixedBytes; } impl commit_encode::Strategy for MerkleNode { type Strategy = commit_encode::strategies::UsingStrict; } impl<MSG> CommitVerify<MSG> for MerkleNode where MSG: AsRef<[u8]>, { #[inline] fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) } } impl<A, B> ConsensusCommit for (A, B) where A: CommitEncode, B: CommitEncode, { type Commitment = MerkleNode; } impl<A, B, C> ConsensusCommit for (A, B, C) where A: CommitEncode, B: CommitEncode, C: CommitEncode, { type Commitment = MerkleNode; } /// Merklization procedure that uses tagged hashes with depth commitments /// according to [LNPBP-81] standard of client-side-validation merklization /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8) where I: IntoIterator<Item = MerkleNode>, <I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>, { let mut tag_engine = sha256::Hash::engine(); tag_engine.input(prefix.as_bytes()); tag_engine.input(":merkle:".as_bytes()); let iter = data.into_iter(); let width = iter.len(); // Tagging merkle tree root let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None); tag_engine.input("root:height=".as_bytes()); tag_engine.input(&height.to_string().into_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(&width.to_string().into_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); let mut engine = MerkleNode::engine(); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); root.commit_encode(&mut engine); let tagged_root = MerkleNode::from_engine(engine); (tagged_root, height) } // TODO: Optimize to avoid allocations // In current rust generic iterators do not work with recursion :( fn merklize_inner( engine_proto: &sha256::HashEngine, mut iter: impl ExactSizeIterator<Item = MerkleNode>, depth: u8, extend: bool, empty_node: Option<MerkleNode>, ) -> (MerkleNode, u8) { let len = iter.len() + extend as usize; let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF])); // Computing tagged hash as per BIP-340 let mut tag_engine = engine_proto.clone(); tag_engine.input("depth=".as_bytes()); tag_engine.input(depth.to_string().as_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(len.to_string().as_bytes()); tag_engine.input(":height=".as_bytes()); let mut engine = MerkleNode::engine(); if len <= 2 { tag_engine.input("0:".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); let mut leaf_tag_engine = engine_proto.clone(); leaf_tag_engine.input("leaf".as_bytes()); let leaf_tag = sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine)); let mut leaf_engine = MerkleNode::engine(); leaf_engine.input(&leaf_tag[..]); leaf_engine.input(&leaf_tag[..]); let mut leaf1 = leaf_engine.clone(); leaf1.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf1).commit_encode(&mut engine); leaf_engine.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine); (MerkleNode::from_engine(engine), 1) } else { let div = len / 2 + len % 2; let (node1, height1) = merklize_inner( engine_proto, // Normally we should use `iter.by_ref().take(div)`, but currently // rust compilers is unable to parse recursion with generic types iter.by_ref().take(div).collect::<Vec<_>>().into_iter(), depth + 1, false, Some(empty_node), ); let iter = if extend { iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter() } else { iter.collect::<Vec<_>>().into_iter() }; let (node2, height2) = merklize_inner( engine_proto, iter, depth + 1, (div % 2 + len % 2) / 2 == 1, Some(empty_node), ); assert_eq!( height1, height2, "merklization algorithm failure: height of subtrees is not equal \ (width = {}, depth = {}, prev_extend = {}, next_extend = {})", len, depth, extend, div % 2 == 1 && len % 2 == 1 ); tag_engine.input(height1.to_string().as_bytes()); tag_engine.input(":".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); node1.commit_encode(&mut engine); node2.commit_encode(&mut engine); (MerkleNode::from_engine(engine), height1 + 1) } } /// The source data for the [LNPBP-81] merklization process. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)] pub struct MerkleSource<T>( /// Array of the data which will be merklized pub Vec<T>, ); impl<L, I> From<I> for MerkleSource<L> where I: IntoIterator<Item = L>, L: CommitEncode, { fn from(collection: I) -> Self
} impl<L> FromIterator<L> for MerkleSource<L> where L: CommitEncode, { fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self { iter.into_iter().collect::<Vec<_>>().into() } } impl<L> CommitEncode for MerkleSource<L> where L: ConsensusMerkleCommit, { fn commit_encode<E: io::Write>(&self, e: E) -> usize { let leafs = self.0.iter().map(L::consensus_commit); merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e) } } impl<L> ConsensusCommit for MerkleSource<L> where L: ConsensusMerkleCommit + CommitEncode, { type Commitment = MerkleNode; #[inline] fn consensus_commit(&self) -> Self::Commitment { MerkleNode::from_slice(&self.commit_serialize()) .expect("MerkleSource::commit_serialize must produce MerkleNode") } #[inline] fn consensus_verify(&self, commitment: &Self::Commitment) -> bool { self.consensus_commit() == *commitment } } /// Converts given piece of client-side-validated data into a structure which /// can be used in merklization process. /// /// This dedicated structure is required since with /// `impl From<_> for MerkleSource` we would not be able to specify a concrete /// tagged hash, which we require in [LNPBP-81] merklization and which we /// provide here via [`ToMerkleSource::Leaf`]` associated type holding /// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub trait ToMerkleSource { /// Defining type of the commitment produced during merlization process type Leaf: ConsensusMerkleCommit; /// Performs transformation of the data type into a merkilzable data fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>; } #[cfg(test)] mod test { use std::collections::BTreeMap; use amplify::{bmap, s}; use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::{sha256d, Hash}; use strict_encoding::StrictEncode; use super::*; use crate::commit_encode::{strategies, Strategy}; use crate::CommitConceal; #[test] fn collections() { // First, we define a data type #[derive( Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, StrictEncode, StrictDecode )] struct Item(pub String); // Next, we say that it should be concealed using some function // (double SHA256 hash in this case) impl CommitConceal for Item { type ConcealedCommitment = sha256d::Hash; fn commit_conceal(&self) -> Self::ConcealedCommitment { sha256d::Hash::hash(self.0.as_bytes()) } } // Next, we need to specify how the concealed data should be // commit-encoded: this time we strict-serialize the hash impl Strategy for sha256d::Hash { type Strategy = strategies::UsingStrict; } // Now, we define commitment encoding for our concealable type: it // should conceal the data impl Strategy for Item { type Strategy = strategies::UsingConceal; } // Now, we need to say that consensus commit procedure should produce // a final commitment from commit-encoded data (equal to the // strict encoding of the conceal result) using `CommitVerify` type. // Here, we use another round of hashing, producing merkle node hash // from the concealed data. impl ConsensusCommit for Item { type Commitment = MerkleNode; } // Next, we need to provide merkle node tags for each type of the tree impl ConsensusMerkleCommit for Item { const MERKLE_NODE_PREFIX: &'static str = "item"; } impl ConsensusMerkleCommit for (usize, Item) { const MERKLE_NODE_PREFIX: &'static str = "usize->item"; } impl ToMerkleSource for BTreeMap<usize, Item> { type Leaf = (usize, Item); fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> { self.iter().map(|(k, v)| (*k, v.clone())).collect() } } let large = vec![Item(s!("none")); 3]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71" ); let large = vec![Item(s!("none")); 5]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94" ); let large = vec![Item(s!("none")); 9]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f" ); let large = vec![Item(s!("none")); 13]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d" ); let large = vec![Item(s!("none")); 17]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f" ); let item = Item(s!("Some text")); assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap()); assert_eq!( "6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e", item.commit_serialize().to_hex() ); assert_eq!( "3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30", item.consensus_commit().to_hex() ); assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap()); assert_eq!( MerkleNode::hash(&item.commit_serialize()), item.consensus_commit() ); let original = bmap! { 0usize => Item(s!("My first case")), 1usize => Item(s!("My second case with a very long string")), 3usize => Item(s!("My third case to make the Merkle tree two layered")) }; let collection = original.to_merkle_source(); assert_eq!( &b"\x03\x00\ \x00\x00\ \x0d\x00\ My first case\ \x01\x00\ \x26\x00\ My second case with a very long string\ \x03\x00\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.commit_serialize().to_hex() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.consensus_commit().to_hex() ); assert_ne!( collection.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&collection.commit_serialize()).unwrap(), collection.consensus_commit() ); let original = vec![ Item(s!("My first case")), Item(s!("My second case with a very long string")), Item(s!("My third case to make the Merkle tree two layered")), ]; let vec: MerkleSource<Item> = original.clone().into(); assert_eq!( &b"\x03\x00\ \x0d\x00\ My first case\ \x26\x00\ My second case with a very long string\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.commit_serialize().to_hex() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.consensus_commit().to_hex() ); assert_ne!( vec.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&vec.commit_serialize()).unwrap(), vec.consensus_commit() ); assert_ne!(vec.consensus_commit(), collection.consensus_commit()); } }
{ Self(collection.into_iter().collect()) }
identifier_body
merkle.rs
// LNP/BP client-side-validation foundation libraries implementing LNPBP // specifications & standards (LNPBP-4, 7, 8, 9, 42, 81) // // Written in 2019-2021 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the Apache 2.0 License along with this // software. If not, see <https://opensource.org/licenses/Apache-2.0>. //! Merklization procedures for client-side-validation according to [LNPBP-81] //! standard. //! //! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md use std::io; use std::iter::FromIterator; use bitcoin_hashes::{sha256, Hash, HashEngine}; use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit}; /// Marker trait for types that require merklization of the underlying data /// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the /// tagged hash used in the merklization (see [`merklize`]). pub trait ConsensusMerkleCommit: ConsensusCommit<Commitment = MerkleNode> { /// The tag prefix which will be used in the merklization process (see /// [`merklize`]) const MERKLE_NODE_PREFIX: &'static str; } hash_newtype!( MerkleNode, sha256::Hash, 32, doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root", false // We do not reverse displaying MerkleNodes in hexadecimal ); impl strict_encoding::Strategy for MerkleNode { type Strategy = strict_encoding::strategies::HashFixedBytes; } impl commit_encode::Strategy for MerkleNode { type Strategy = commit_encode::strategies::UsingStrict; } impl<MSG> CommitVerify<MSG> for MerkleNode where MSG: AsRef<[u8]>, { #[inline] fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) } } impl<A, B> ConsensusCommit for (A, B) where A: CommitEncode, B: CommitEncode, { type Commitment = MerkleNode; } impl<A, B, C> ConsensusCommit for (A, B, C) where A: CommitEncode, B: CommitEncode, C: CommitEncode, { type Commitment = MerkleNode; } /// Merklization procedure that uses tagged hashes with depth commitments /// according to [LNPBP-81] standard of client-side-validation merklization /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8) where I: IntoIterator<Item = MerkleNode>, <I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>, { let mut tag_engine = sha256::Hash::engine(); tag_engine.input(prefix.as_bytes()); tag_engine.input(":merkle:".as_bytes()); let iter = data.into_iter(); let width = iter.len(); // Tagging merkle tree root let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None); tag_engine.input("root:height=".as_bytes()); tag_engine.input(&height.to_string().into_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(&width.to_string().into_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); let mut engine = MerkleNode::engine(); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); root.commit_encode(&mut engine); let tagged_root = MerkleNode::from_engine(engine); (tagged_root, height) } // TODO: Optimize to avoid allocations // In current rust generic iterators do not work with recursion :( fn merklize_inner( engine_proto: &sha256::HashEngine, mut iter: impl ExactSizeIterator<Item = MerkleNode>, depth: u8, extend: bool, empty_node: Option<MerkleNode>, ) -> (MerkleNode, u8) { let len = iter.len() + extend as usize; let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF])); // Computing tagged hash as per BIP-340 let mut tag_engine = engine_proto.clone(); tag_engine.input("depth=".as_bytes()); tag_engine.input(depth.to_string().as_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(len.to_string().as_bytes()); tag_engine.input(":height=".as_bytes()); let mut engine = MerkleNode::engine(); if len <= 2 { tag_engine.input("0:".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); let mut leaf_tag_engine = engine_proto.clone(); leaf_tag_engine.input("leaf".as_bytes()); let leaf_tag = sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine)); let mut leaf_engine = MerkleNode::engine(); leaf_engine.input(&leaf_tag[..]); leaf_engine.input(&leaf_tag[..]); let mut leaf1 = leaf_engine.clone(); leaf1.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf1).commit_encode(&mut engine); leaf_engine.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine); (MerkleNode::from_engine(engine), 1) } else { let div = len / 2 + len % 2; let (node1, height1) = merklize_inner( engine_proto, // Normally we should use `iter.by_ref().take(div)`, but currently // rust compilers is unable to parse recursion with generic types iter.by_ref().take(div).collect::<Vec<_>>().into_iter(),
let iter = if extend { iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter() } else { iter.collect::<Vec<_>>().into_iter() }; let (node2, height2) = merklize_inner( engine_proto, iter, depth + 1, (div % 2 + len % 2) / 2 == 1, Some(empty_node), ); assert_eq!( height1, height2, "merklization algorithm failure: height of subtrees is not equal \ (width = {}, depth = {}, prev_extend = {}, next_extend = {})", len, depth, extend, div % 2 == 1 && len % 2 == 1 ); tag_engine.input(height1.to_string().as_bytes()); tag_engine.input(":".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); node1.commit_encode(&mut engine); node2.commit_encode(&mut engine); (MerkleNode::from_engine(engine), height1 + 1) } } /// The source data for the [LNPBP-81] merklization process. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)] pub struct MerkleSource<T>( /// Array of the data which will be merklized pub Vec<T>, ); impl<L, I> From<I> for MerkleSource<L> where I: IntoIterator<Item = L>, L: CommitEncode, { fn from(collection: I) -> Self { Self(collection.into_iter().collect()) } } impl<L> FromIterator<L> for MerkleSource<L> where L: CommitEncode, { fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self { iter.into_iter().collect::<Vec<_>>().into() } } impl<L> CommitEncode for MerkleSource<L> where L: ConsensusMerkleCommit, { fn commit_encode<E: io::Write>(&self, e: E) -> usize { let leafs = self.0.iter().map(L::consensus_commit); merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e) } } impl<L> ConsensusCommit for MerkleSource<L> where L: ConsensusMerkleCommit + CommitEncode, { type Commitment = MerkleNode; #[inline] fn consensus_commit(&self) -> Self::Commitment { MerkleNode::from_slice(&self.commit_serialize()) .expect("MerkleSource::commit_serialize must produce MerkleNode") } #[inline] fn consensus_verify(&self, commitment: &Self::Commitment) -> bool { self.consensus_commit() == *commitment } } /// Converts given piece of client-side-validated data into a structure which /// can be used in merklization process. /// /// This dedicated structure is required since with /// `impl From<_> for MerkleSource` we would not be able to specify a concrete /// tagged hash, which we require in [LNPBP-81] merklization and which we /// provide here via [`ToMerkleSource::Leaf`]` associated type holding /// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub trait ToMerkleSource { /// Defining type of the commitment produced during merlization process type Leaf: ConsensusMerkleCommit; /// Performs transformation of the data type into a merkilzable data fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>; } #[cfg(test)] mod test { use std::collections::BTreeMap; use amplify::{bmap, s}; use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::{sha256d, Hash}; use strict_encoding::StrictEncode; use super::*; use crate::commit_encode::{strategies, Strategy}; use crate::CommitConceal; #[test] fn collections() { // First, we define a data type #[derive( Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, StrictEncode, StrictDecode )] struct Item(pub String); // Next, we say that it should be concealed using some function // (double SHA256 hash in this case) impl CommitConceal for Item { type ConcealedCommitment = sha256d::Hash; fn commit_conceal(&self) -> Self::ConcealedCommitment { sha256d::Hash::hash(self.0.as_bytes()) } } // Next, we need to specify how the concealed data should be // commit-encoded: this time we strict-serialize the hash impl Strategy for sha256d::Hash { type Strategy = strategies::UsingStrict; } // Now, we define commitment encoding for our concealable type: it // should conceal the data impl Strategy for Item { type Strategy = strategies::UsingConceal; } // Now, we need to say that consensus commit procedure should produce // a final commitment from commit-encoded data (equal to the // strict encoding of the conceal result) using `CommitVerify` type. // Here, we use another round of hashing, producing merkle node hash // from the concealed data. impl ConsensusCommit for Item { type Commitment = MerkleNode; } // Next, we need to provide merkle node tags for each type of the tree impl ConsensusMerkleCommit for Item { const MERKLE_NODE_PREFIX: &'static str = "item"; } impl ConsensusMerkleCommit for (usize, Item) { const MERKLE_NODE_PREFIX: &'static str = "usize->item"; } impl ToMerkleSource for BTreeMap<usize, Item> { type Leaf = (usize, Item); fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> { self.iter().map(|(k, v)| (*k, v.clone())).collect() } } let large = vec![Item(s!("none")); 3]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71" ); let large = vec![Item(s!("none")); 5]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94" ); let large = vec![Item(s!("none")); 9]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f" ); let large = vec![Item(s!("none")); 13]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d" ); let large = vec![Item(s!("none")); 17]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f" ); let item = Item(s!("Some text")); assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap()); assert_eq!( "6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e", item.commit_serialize().to_hex() ); assert_eq!( "3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30", item.consensus_commit().to_hex() ); assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap()); assert_eq!( MerkleNode::hash(&item.commit_serialize()), item.consensus_commit() ); let original = bmap! { 0usize => Item(s!("My first case")), 1usize => Item(s!("My second case with a very long string")), 3usize => Item(s!("My third case to make the Merkle tree two layered")) }; let collection = original.to_merkle_source(); assert_eq!( &b"\x03\x00\ \x00\x00\ \x0d\x00\ My first case\ \x01\x00\ \x26\x00\ My second case with a very long string\ \x03\x00\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.commit_serialize().to_hex() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.consensus_commit().to_hex() ); assert_ne!( collection.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&collection.commit_serialize()).unwrap(), collection.consensus_commit() ); let original = vec![ Item(s!("My first case")), Item(s!("My second case with a very long string")), Item(s!("My third case to make the Merkle tree two layered")), ]; let vec: MerkleSource<Item> = original.clone().into(); assert_eq!( &b"\x03\x00\ \x0d\x00\ My first case\ \x26\x00\ My second case with a very long string\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.commit_serialize().to_hex() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.consensus_commit().to_hex() ); assert_ne!( vec.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&vec.commit_serialize()).unwrap(), vec.consensus_commit() ); assert_ne!(vec.consensus_commit(), collection.consensus_commit()); } }
depth + 1, false, Some(empty_node), );
random_line_split
merkle.rs
// LNP/BP client-side-validation foundation libraries implementing LNPBP // specifications & standards (LNPBP-4, 7, 8, 9, 42, 81) // // Written in 2019-2021 by // Dr. Maxim Orlovsky <[email protected]> // // To the extent possible under law, the author(s) have dedicated all // copyright and related and neighboring rights to this software to // the public domain worldwide. This software is distributed without // any warranty. // // You should have received a copy of the Apache 2.0 License along with this // software. If not, see <https://opensource.org/licenses/Apache-2.0>. //! Merklization procedures for client-side-validation according to [LNPBP-81] //! standard. //! //! [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md use std::io; use std::iter::FromIterator; use bitcoin_hashes::{sha256, Hash, HashEngine}; use crate::{commit_encode, CommitEncode, CommitVerify, ConsensusCommit}; /// Marker trait for types that require merklization of the underlying data /// during [`ConsensusCommit`] procedure. Allows specifying custom tag for the /// tagged hash used in the merklization (see [`merklize`]). pub trait ConsensusMerkleCommit: ConsensusCommit<Commitment = MerkleNode> { /// The tag prefix which will be used in the merklization process (see /// [`merklize`]) const MERKLE_NODE_PREFIX: &'static str; } hash_newtype!( MerkleNode, sha256::Hash, 32, doc = "A hash type for LNPBP-81 Merkle tree leaves, branches and root", false // We do not reverse displaying MerkleNodes in hexadecimal ); impl strict_encoding::Strategy for MerkleNode { type Strategy = strict_encoding::strategies::HashFixedBytes; } impl commit_encode::Strategy for MerkleNode { type Strategy = commit_encode::strategies::UsingStrict; } impl<MSG> CommitVerify<MSG> for MerkleNode where MSG: AsRef<[u8]>, { #[inline] fn commit(msg: &MSG) -> MerkleNode { MerkleNode::hash(msg.as_ref()) } } impl<A, B> ConsensusCommit for (A, B) where A: CommitEncode, B: CommitEncode, { type Commitment = MerkleNode; } impl<A, B, C> ConsensusCommit for (A, B, C) where A: CommitEncode, B: CommitEncode, C: CommitEncode, { type Commitment = MerkleNode; } /// Merklization procedure that uses tagged hashes with depth commitments /// according to [LNPBP-81] standard of client-side-validation merklization /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub fn merklize<I>(prefix: &str, data: I) -> (MerkleNode, u8) where I: IntoIterator<Item = MerkleNode>, <I as IntoIterator>::IntoIter: ExactSizeIterator<Item = MerkleNode>, { let mut tag_engine = sha256::Hash::engine(); tag_engine.input(prefix.as_bytes()); tag_engine.input(":merkle:".as_bytes()); let iter = data.into_iter(); let width = iter.len(); // Tagging merkle tree root let (root, height) = merklize_inner(&tag_engine, iter, 0, false, None); tag_engine.input("root:height=".as_bytes()); tag_engine.input(&height.to_string().into_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(&width.to_string().into_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); let mut engine = MerkleNode::engine(); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); root.commit_encode(&mut engine); let tagged_root = MerkleNode::from_engine(engine); (tagged_root, height) } // TODO: Optimize to avoid allocations // In current rust generic iterators do not work with recursion :( fn merklize_inner( engine_proto: &sha256::HashEngine, mut iter: impl ExactSizeIterator<Item = MerkleNode>, depth: u8, extend: bool, empty_node: Option<MerkleNode>, ) -> (MerkleNode, u8) { let len = iter.len() + extend as usize; let empty_node = empty_node.unwrap_or_else(|| MerkleNode::hash(&[0xFF])); // Computing tagged hash as per BIP-340 let mut tag_engine = engine_proto.clone(); tag_engine.input("depth=".as_bytes()); tag_engine.input(depth.to_string().as_bytes()); tag_engine.input(":width=".as_bytes()); tag_engine.input(len.to_string().as_bytes()); tag_engine.input(":height=".as_bytes()); let mut engine = MerkleNode::engine(); if len <= 2 { tag_engine.input("0:".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); let mut leaf_tag_engine = engine_proto.clone(); leaf_tag_engine.input("leaf".as_bytes()); let leaf_tag = sha256::Hash::hash(&sha256::Hash::from_engine(leaf_tag_engine)); let mut leaf_engine = MerkleNode::engine(); leaf_engine.input(&leaf_tag[..]); leaf_engine.input(&leaf_tag[..]); let mut leaf1 = leaf_engine.clone(); leaf1.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf1).commit_encode(&mut engine); leaf_engine.input( iter.next() .as_ref() .map(|d| d.as_ref()) .unwrap_or_else(|| empty_node.as_ref()), ); MerkleNode::from_engine(leaf_engine).commit_encode(&mut engine); (MerkleNode::from_engine(engine), 1) } else { let div = len / 2 + len % 2; let (node1, height1) = merklize_inner( engine_proto, // Normally we should use `iter.by_ref().take(div)`, but currently // rust compilers is unable to parse recursion with generic types iter.by_ref().take(div).collect::<Vec<_>>().into_iter(), depth + 1, false, Some(empty_node), ); let iter = if extend { iter.chain(vec![empty_node]).collect::<Vec<_>>().into_iter() } else { iter.collect::<Vec<_>>().into_iter() }; let (node2, height2) = merklize_inner( engine_proto, iter, depth + 1, (div % 2 + len % 2) / 2 == 1, Some(empty_node), ); assert_eq!( height1, height2, "merklization algorithm failure: height of subtrees is not equal \ (width = {}, depth = {}, prev_extend = {}, next_extend = {})", len, depth, extend, div % 2 == 1 && len % 2 == 1 ); tag_engine.input(height1.to_string().as_bytes()); tag_engine.input(":".as_bytes()); let tag_hash = sha256::Hash::hash(&sha256::Hash::from_engine(tag_engine)); engine.input(&tag_hash[..]); engine.input(&tag_hash[..]); node1.commit_encode(&mut engine); node2.commit_encode(&mut engine); (MerkleNode::from_engine(engine), height1 + 1) } } /// The source data for the [LNPBP-81] merklization process. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Default)] pub struct MerkleSource<T>( /// Array of the data which will be merklized pub Vec<T>, ); impl<L, I> From<I> for MerkleSource<L> where I: IntoIterator<Item = L>, L: CommitEncode, { fn from(collection: I) -> Self { Self(collection.into_iter().collect()) } } impl<L> FromIterator<L> for MerkleSource<L> where L: CommitEncode, { fn from_iter<T: IntoIterator<Item = L>>(iter: T) -> Self { iter.into_iter().collect::<Vec<_>>().into() } } impl<L> CommitEncode for MerkleSource<L> where L: ConsensusMerkleCommit, { fn
<E: io::Write>(&self, e: E) -> usize { let leafs = self.0.iter().map(L::consensus_commit); merklize(L::MERKLE_NODE_PREFIX, leafs).0.commit_encode(e) } } impl<L> ConsensusCommit for MerkleSource<L> where L: ConsensusMerkleCommit + CommitEncode, { type Commitment = MerkleNode; #[inline] fn consensus_commit(&self) -> Self::Commitment { MerkleNode::from_slice(&self.commit_serialize()) .expect("MerkleSource::commit_serialize must produce MerkleNode") } #[inline] fn consensus_verify(&self, commitment: &Self::Commitment) -> bool { self.consensus_commit() == *commitment } } /// Converts given piece of client-side-validated data into a structure which /// can be used in merklization process. /// /// This dedicated structure is required since with /// `impl From<_> for MerkleSource` we would not be able to specify a concrete /// tagged hash, which we require in [LNPBP-81] merklization and which we /// provide here via [`ToMerkleSource::Leaf`]` associated type holding /// [`ConsensusMerkleCommit::MERKLE_NODE_PREFIX`] prefix value. /// /// [LNPBP-81]: https://github.com/LNP-BP/LNPBPs/blob/master/lnpbp-0081.md pub trait ToMerkleSource { /// Defining type of the commitment produced during merlization process type Leaf: ConsensusMerkleCommit; /// Performs transformation of the data type into a merkilzable data fn to_merkle_source(&self) -> MerkleSource<Self::Leaf>; } #[cfg(test)] mod test { use std::collections::BTreeMap; use amplify::{bmap, s}; use bitcoin_hashes::hex::ToHex; use bitcoin_hashes::{sha256d, Hash}; use strict_encoding::StrictEncode; use super::*; use crate::commit_encode::{strategies, Strategy}; use crate::CommitConceal; #[test] fn collections() { // First, we define a data type #[derive( Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, StrictEncode, StrictDecode )] struct Item(pub String); // Next, we say that it should be concealed using some function // (double SHA256 hash in this case) impl CommitConceal for Item { type ConcealedCommitment = sha256d::Hash; fn commit_conceal(&self) -> Self::ConcealedCommitment { sha256d::Hash::hash(self.0.as_bytes()) } } // Next, we need to specify how the concealed data should be // commit-encoded: this time we strict-serialize the hash impl Strategy for sha256d::Hash { type Strategy = strategies::UsingStrict; } // Now, we define commitment encoding for our concealable type: it // should conceal the data impl Strategy for Item { type Strategy = strategies::UsingConceal; } // Now, we need to say that consensus commit procedure should produce // a final commitment from commit-encoded data (equal to the // strict encoding of the conceal result) using `CommitVerify` type. // Here, we use another round of hashing, producing merkle node hash // from the concealed data. impl ConsensusCommit for Item { type Commitment = MerkleNode; } // Next, we need to provide merkle node tags for each type of the tree impl ConsensusMerkleCommit for Item { const MERKLE_NODE_PREFIX: &'static str = "item"; } impl ConsensusMerkleCommit for (usize, Item) { const MERKLE_NODE_PREFIX: &'static str = "usize->item"; } impl ToMerkleSource for BTreeMap<usize, Item> { type Leaf = (usize, Item); fn to_merkle_source(&self) -> MerkleSource<Self::Leaf> { self.iter().map(|(k, v)| (*k, v.clone())).collect() } } let large = vec![Item(s!("none")); 3]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "71ea45868fbd924061c4deb84f37ed82b0ac808de12aa7659afda7d9303e7a71" ); let large = vec![Item(s!("none")); 5]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "e255e0124efe0555fde0d932a0bc0042614129e1a02f7b8c0bf608b81af3eb94" ); let large = vec![Item(s!("none")); 9]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6cd2d5345a654af4720bdcc637183ded8e432dc88f778b7d27c8d5a0e342c65f" ); let large = vec![Item(s!("none")); 13]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "3714c08c7c94a4ef769ad2cb7df9aaca1e1252d6599a02aff281c37e7242797d" ); let large = vec![Item(s!("none")); 17]; let vec: MerkleSource<Item> = large.clone().into(); assert_eq!( vec.commit_serialize().to_hex(), "6093dec47e5bdd706da01e4479cb65632eac426eb59c8c28c4e6c199438c8b6f" ); let item = Item(s!("Some text")); assert_eq!(&b"\x09\x00Some text"[..], item.strict_serialize().unwrap()); assert_eq!( "6680bbec0d05d3eaac9c8b658c40f28d2f0cb0f245c7b1cabf5a61c35bd03d8e", item.commit_serialize().to_hex() ); assert_eq!( "3e4b2dcf9bca33400028c8947565c1ff421f6d561e9ec48f88f0c9a24ebc8c30", item.consensus_commit().to_hex() ); assert_ne!(item.commit_serialize(), item.strict_serialize().unwrap()); assert_eq!( MerkleNode::hash(&item.commit_serialize()), item.consensus_commit() ); let original = bmap! { 0usize => Item(s!("My first case")), 1usize => Item(s!("My second case with a very long string")), 3usize => Item(s!("My third case to make the Merkle tree two layered")) }; let collection = original.to_merkle_source(); assert_eq!( &b"\x03\x00\ \x00\x00\ \x0d\x00\ My first case\ \x01\x00\ \x26\x00\ My second case with a very long string\ \x03\x00\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.commit_serialize().to_hex() ); assert_eq!( "d911717b8dfbbcef68495c93c0a5e69df618f5dcc194d69e80b6fafbfcd6ed5d", collection.consensus_commit().to_hex() ); assert_ne!( collection.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&collection.commit_serialize()).unwrap(), collection.consensus_commit() ); let original = vec![ Item(s!("My first case")), Item(s!("My second case with a very long string")), Item(s!("My third case to make the Merkle tree two layered")), ]; let vec: MerkleSource<Item> = original.clone().into(); assert_eq!( &b"\x03\x00\ \x0d\x00\ My first case\ \x26\x00\ My second case with a very long string\ \x31\x00\ My third case to make the Merkle tree two layered"[..], original.strict_serialize().unwrap() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.commit_serialize().to_hex() ); assert_eq!( "fd72061e26055fb907aa512a591b4291e739f15198eb72027c4dd6506f14f469", vec.consensus_commit().to_hex() ); assert_ne!( vec.commit_serialize(), original.strict_serialize().unwrap() ); assert_eq!( MerkleNode::from_slice(&vec.commit_serialize()).unwrap(), vec.consensus_commit() ); assert_ne!(vec.consensus_commit(), collection.consensus_commit()); } }
commit_encode
identifier_name
mysql_table_scanner.go
package mysqlbatch import ( "context" "database/sql" "fmt" "reflect" "strconv" "sync" "time" "github.com/juju/errors" "github.com/pingcap/parser" log "github.com/sirupsen/logrus" "github.com/moiot/gravity/pkg/core" "github.com/moiot/gravity/pkg/metrics" "github.com/moiot/gravity/pkg/mysql" "github.com/moiot/gravity/pkg/position_store" "github.com/moiot/gravity/pkg/schema_store" "github.com/moiot/gravity/pkg/utils" ) var ErrTableEmpty = errors.New("table_scanner: this table is empty") type TableScanner struct { pipelineName string tableWorkC chan *TableWork cfg *PluginConfig positionStore position_store.MySQLTablePositionStore db *sql.DB emitter core.Emitter throttle *time.Ticker ctx context.Context schemaStore schema_store.SchemaStore wg sync.WaitGroup parser *parser.Parser } func (tableScanner *TableScanner) Start() error { tableScanner.wg.Add(1) go func() { defer tableScanner.wg.Done() for { select { case work, ok := <-tableScanner.tableWorkC: if !ok { log.Infof("[TableScanner] queue closed, exit") return } err := tableScanner.initTableDDL(work.TableDef) if err != nil { log.Fatalf("[TableScanner] initTableDDL for %s.%s, err: %s", work.TableDef.Schema, work.TableDef.Name, err) } err = tableScanner.InitTablePosition(work.TableDef, work.TableConfig) if err == ErrTableEmpty { log.Infof("[TableScanner] Target table is empty. schema: %v, table: %v", work.TableDef.Schema, work.TableDef.Name) continue } else if err != nil { log.Fatalf("[TableScanner] InitTablePosition failed: %v", errors.ErrorStack(err)) } max, min, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(work.TableDef.Schema, work.TableDef.Name)) log.Infof("positionStore.GetMaxMin: max value type: %v, max %v; min value type: %v, min %v", reflect.TypeOf(max.Value), max, reflect.TypeOf(min.Value), min) scanColumn := max.Column if !ok { log.Fatalf("[table_scanner] failed to find max min") } // If the scan column is *, then we do a full dump of the table if scanColumn == "*" { tableScanner.FindAll(tableScanner.db, work.TableDef, work.TableConfig) } else { tableScanner.LoopInBatch( tableScanner.db, work.TableDef, work.TableConfig, scanColumn, max, min, tableScanner.cfg.TableScanBatch) if tableScanner.ctx.Err() == nil { log.Infof("[table_worker] LoopInBatch done with table %s", work.TableDef.Name) } else if tableScanner.ctx.Err() == context.Canceled { log.Infof("[TableScanner] LoopInBatch canceled") return } else { log.Fatalf("[TableScanner] LoopInBatch unknow case,err: %v", tableScanner.ctx.Err()) } } case <-tableScanner.ctx.Done(): log.Infof("[TableScanner] canceled by context") return } } }() return nil } func (tableScanner *TableScanner) InitTablePosition(tableDef *schema_store.Table, tableConfig *TableConfig) error { _, _, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { log.Infof("[InitTablePosition] init table position") // detect scan column first var scanColumn string var scanType string column, err := DetectScanColumn(tableScanner.db, tableDef.Schema, tableDef.Name, tableScanner.cfg.MaxFullDumpCount) if err != nil { return errors.Trace(err) } scanColumn = column if scanColumn == "*" { maxPos := position_store.MySQLTablePosition{Column: scanColumn} minPos := position_store.MySQLTablePosition{Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) } else { max, min := FindMaxMinValueFromDB(tableScanner.db, tableDef.Schema, tableDef.Name, scanColumn) maxPos := position_store.MySQLTablePosition{Value: max, Type: scanType, Column: scanColumn} empty, err := tableScanner.validateTableEmpty(maxPos) if err != nil { return errors.Trace(err) } if empty { return ErrTableEmpty } minPos := position_store.MySQLTablePosition{Value: min, Type: scanType, Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) log.Infof("[InitTablePosition] PutMaxMin: max value type: %v, max: %v; min value type: %v, min: %v", reflect.TypeOf(maxPos.Value), maxPos, reflect.TypeOf(minPos.Value), minPos) } log.Infof("[InitTablePosition] schema: %v, table: %v, scanColumn: %v", tableDef.Schema, tableDef.Name, scanColumn) } return nil } func (tableScanner *TableScanner) validateTableEmpty(pos position_store.MySQLTablePosition) (bool, error) { mapStr, err := pos.MapString() if err != nil { return false, errors.Trace(err) } return mapStr["value"] == "", nil } func (tableScanner *TableScanner) Wait() { tableScanner.wg.Wait() } // DetectScanColumn find a column that we used to scan the table // SHOW INDEX FROM .. // Pick primary key, if there is only one primary key // If pk not found try using unique index // fail func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) { pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(pks) == 1 { return pks[0], nil } uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(uniqueIndexes) > 0 { return uniqueIndexes[0], nil } rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if rowsCount < maxFullDumpRowsCount { return "*", nil } return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName) } func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) { var max interface{} var min interface{} maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement) maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface() minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement) minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface() return max, min } // LoopInBatch will iterate the table by sql like this: // SELECT * FROM a WHERE some_key > some_value LIMIT 10000 // It will get the min, max value of the column and iterate batch by batch func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) { pipelineName := tableScanner.pipelineName if batch <= 0 { log.Fatalf("[LoopInBatch] batch size is 0") } maxMapString, err := max.MapString() if err != nil { log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err)) } batchIdx := 0 firstLoop := true maxReached := false var statement string currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min) currentMinPos = min } log.Infof("[LoopInBatch] prepare current: %v", currentMinPos) currentMinValue := currentMinPos.Value resultCount := 0 columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err)) } scanIdx, err := GetScanIdx(columnTypes, scanColumn) if err != nil { log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err)) } rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch) for { if firstLoop { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) firstLoop = false } else { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) } <-tableScanner.throttle.C queryStartTime := time.Now() rows, err := db.Query(statement, currentMinValue, batch) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err) } rowIdx := 0 for rows.Next() { metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1) resultCount++ rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx]) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err)) } currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface() rowIdx++ if mysql.MySQLDataEquals(max.Value, currentMinValue) { maxReached = true break } } err = rows.Err() if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err) } rows.Close() // no result found for this query if rowIdx == 0 { log.Infof("[TableScanner] query result is 0, return") return } metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds()) batchIdx++ var lastMsg *core.Msg // process this batch's data for i := 0; i < rowIdx; i++ { rowPtrs := rowsBatchDataPtrs[i] posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface()) position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn} msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err)) } lastMsg = msg } log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v", tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount) // we break the loop here in case the currentMinPos comes larger than the max we have in the beginning. if maxReached { log.Infof("[LoopInBatch] max reached") if lastMsg != nil { <-lastMsg.Done // close the stream msg := NewCloseInputStreamMsg(tableDef) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err)) } log.Infof("[LoopInBatch] sent close input stream msg") } return } select { case <-tableScanner.ctx.Done(): log.Infof("[table_worker] canceled by context") return default: continue } } } func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) { columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err)) } statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name) allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement) if err != nil { log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err)) } for i := range allData { rowPtrs := allData[i] msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{}) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err)) } } } func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error { p, ok := msg.InputContext.(position_store.MySQLTablePosition) if !ok { return errors.Errorf("type invalid") } tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p) return nil } func (tableScanner *TableScanner)
(table *schema_store.Table) error { row := tableScanner.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", table.Schema, table.Name)) var t, create string err := row.Scan(&t, &create) if err != nil { return errors.Trace(err) } msg := NewCreateTableMsg(tableScanner.parser, table, create) if err := tableScanner.emitter.Emit(msg); err != nil { return errors.Trace(err) } <-msg.Done return nil } func GetTableColumnTypes(db *sql.DB, schema string, table string) ([]*sql.ColumnType, error) { statement := fmt.Sprintf("SELECT * FROM `%s`.`%s` LIMIT 1", schema, table) rows, err := db.Query(statement) if err != nil { return nil, errors.Trace(err) } defer rows.Close() return rows.ColumnTypes() } func GetScanIdx(columnTypes []*sql.ColumnType, scanColumn string) (int, error) { for i := range columnTypes { if columnTypes[i].Name() == scanColumn { return i, nil } } return 0, errors.Errorf("cannot find scan index") } func newBatchDataPtrs(columnTypes []*sql.ColumnType, batch int) [][]interface{} { ret := make([][]interface{}, batch) for batchIdx := 0; batchIdx < batch; batchIdx++ { vPtrs := make([]interface{}, len(columnTypes)) for columnIdx, _ := range columnTypes { scanType := utils.GetScanType(columnTypes[columnIdx]) vptr := reflect.New(scanType) vPtrs[columnIdx] = vptr.Interface() } ret[batchIdx] = vPtrs } return ret } func NewTableScanner( pipelineName string, tableWorkC chan *TableWork, db *sql.DB, positionStore position_store.MySQLTablePositionStore, emitter core.Emitter, throttle *time.Ticker, schemaStore schema_store.SchemaStore, cfg *PluginConfig, ctx context.Context) *TableScanner { tableScanner := TableScanner{ pipelineName: pipelineName, tableWorkC: tableWorkC, db: db, positionStore: positionStore, emitter: emitter, throttle: throttle, schemaStore: schemaStore, cfg: cfg, ctx: ctx, parser: parser.New(), } return &tableScanner } func String2Val(s string, scanType string) interface{} { var currentMin interface{} var err error if scanType == "string" { currentMin = s } else if scanType == "int" { currentMin, err = strconv.Atoi(s) if err != nil { log.Fatalf("[LoopInBatch] failed to convert string to int: %v", err) } } else { log.Infof("[LoopInBatch] scanColumn not supported") } return currentMin }
initTableDDL
identifier_name
mysql_table_scanner.go
package mysqlbatch import ( "context" "database/sql" "fmt" "reflect" "strconv" "sync" "time" "github.com/juju/errors" "github.com/pingcap/parser" log "github.com/sirupsen/logrus" "github.com/moiot/gravity/pkg/core" "github.com/moiot/gravity/pkg/metrics" "github.com/moiot/gravity/pkg/mysql" "github.com/moiot/gravity/pkg/position_store" "github.com/moiot/gravity/pkg/schema_store" "github.com/moiot/gravity/pkg/utils" ) var ErrTableEmpty = errors.New("table_scanner: this table is empty") type TableScanner struct { pipelineName string tableWorkC chan *TableWork cfg *PluginConfig positionStore position_store.MySQLTablePositionStore db *sql.DB emitter core.Emitter throttle *time.Ticker ctx context.Context schemaStore schema_store.SchemaStore wg sync.WaitGroup parser *parser.Parser } func (tableScanner *TableScanner) Start() error { tableScanner.wg.Add(1) go func() { defer tableScanner.wg.Done() for { select { case work, ok := <-tableScanner.tableWorkC: if !ok { log.Infof("[TableScanner] queue closed, exit") return } err := tableScanner.initTableDDL(work.TableDef) if err != nil { log.Fatalf("[TableScanner] initTableDDL for %s.%s, err: %s", work.TableDef.Schema, work.TableDef.Name, err) } err = tableScanner.InitTablePosition(work.TableDef, work.TableConfig) if err == ErrTableEmpty { log.Infof("[TableScanner] Target table is empty. schema: %v, table: %v", work.TableDef.Schema, work.TableDef.Name) continue } else if err != nil { log.Fatalf("[TableScanner] InitTablePosition failed: %v", errors.ErrorStack(err)) } max, min, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(work.TableDef.Schema, work.TableDef.Name)) log.Infof("positionStore.GetMaxMin: max value type: %v, max %v; min value type: %v, min %v", reflect.TypeOf(max.Value), max, reflect.TypeOf(min.Value), min) scanColumn := max.Column if !ok { log.Fatalf("[table_scanner] failed to find max min") } // If the scan column is *, then we do a full dump of the table if scanColumn == "*" { tableScanner.FindAll(tableScanner.db, work.TableDef, work.TableConfig) } else { tableScanner.LoopInBatch( tableScanner.db, work.TableDef, work.TableConfig, scanColumn, max, min, tableScanner.cfg.TableScanBatch) if tableScanner.ctx.Err() == nil { log.Infof("[table_worker] LoopInBatch done with table %s", work.TableDef.Name) } else if tableScanner.ctx.Err() == context.Canceled { log.Infof("[TableScanner] LoopInBatch canceled") return } else { log.Fatalf("[TableScanner] LoopInBatch unknow case,err: %v", tableScanner.ctx.Err()) } } case <-tableScanner.ctx.Done(): log.Infof("[TableScanner] canceled by context") return } } }() return nil } func (tableScanner *TableScanner) InitTablePosition(tableDef *schema_store.Table, tableConfig *TableConfig) error { _, _, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { log.Infof("[InitTablePosition] init table position") // detect scan column first var scanColumn string var scanType string column, err := DetectScanColumn(tableScanner.db, tableDef.Schema, tableDef.Name, tableScanner.cfg.MaxFullDumpCount) if err != nil { return errors.Trace(err) } scanColumn = column if scanColumn == "*" { maxPos := position_store.MySQLTablePosition{Column: scanColumn} minPos := position_store.MySQLTablePosition{Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) } else { max, min := FindMaxMinValueFromDB(tableScanner.db, tableDef.Schema, tableDef.Name, scanColumn) maxPos := position_store.MySQLTablePosition{Value: max, Type: scanType, Column: scanColumn} empty, err := tableScanner.validateTableEmpty(maxPos) if err != nil { return errors.Trace(err) } if empty { return ErrTableEmpty } minPos := position_store.MySQLTablePosition{Value: min, Type: scanType, Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) log.Infof("[InitTablePosition] PutMaxMin: max value type: %v, max: %v; min value type: %v, min: %v", reflect.TypeOf(maxPos.Value), maxPos, reflect.TypeOf(minPos.Value), minPos) } log.Infof("[InitTablePosition] schema: %v, table: %v, scanColumn: %v", tableDef.Schema, tableDef.Name, scanColumn) } return nil } func (tableScanner *TableScanner) validateTableEmpty(pos position_store.MySQLTablePosition) (bool, error) { mapStr, err := pos.MapString() if err != nil { return false, errors.Trace(err) } return mapStr["value"] == "", nil } func (tableScanner *TableScanner) Wait() { tableScanner.wg.Wait() } // DetectScanColumn find a column that we used to scan the table // SHOW INDEX FROM .. // Pick primary key, if there is only one primary key // If pk not found try using unique index // fail func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) { pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(pks) == 1 { return pks[0], nil } uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(uniqueIndexes) > 0 { return uniqueIndexes[0], nil } rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if rowsCount < maxFullDumpRowsCount { return "*", nil } return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName) } func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) { var max interface{} var min interface{} maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement) maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface() minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement) minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface() return max, min } // LoopInBatch will iterate the table by sql like this: // SELECT * FROM a WHERE some_key > some_value LIMIT 10000 // It will get the min, max value of the column and iterate batch by batch func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) { pipelineName := tableScanner.pipelineName if batch <= 0 { log.Fatalf("[LoopInBatch] batch size is 0") } maxMapString, err := max.MapString() if err != nil { log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err)) } batchIdx := 0 firstLoop := true maxReached := false var statement string currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min) currentMinPos = min } log.Infof("[LoopInBatch] prepare current: %v", currentMinPos) currentMinValue := currentMinPos.Value resultCount := 0 columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err)) } scanIdx, err := GetScanIdx(columnTypes, scanColumn) if err != nil { log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err)) } rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch) for { if firstLoop { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) firstLoop = false } else { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) } <-tableScanner.throttle.C queryStartTime := time.Now() rows, err := db.Query(statement, currentMinValue, batch) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err) } rowIdx := 0 for rows.Next() { metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1) resultCount++ rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx]) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err)) } currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface() rowIdx++ if mysql.MySQLDataEquals(max.Value, currentMinValue) { maxReached = true break } } err = rows.Err() if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err) } rows.Close() // no result found for this query if rowIdx == 0 { log.Infof("[TableScanner] query result is 0, return") return } metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds()) batchIdx++ var lastMsg *core.Msg // process this batch's data for i := 0; i < rowIdx; i++ { rowPtrs := rowsBatchDataPtrs[i] posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface()) position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn} msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err)) } lastMsg = msg } log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v", tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount) // we break the loop here in case the currentMinPos comes larger than the max we have in the beginning. if maxReached { log.Infof("[LoopInBatch] max reached") if lastMsg != nil { <-lastMsg.Done // close the stream msg := NewCloseInputStreamMsg(tableDef) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err)) } log.Infof("[LoopInBatch] sent close input stream msg") } return } select { case <-tableScanner.ctx.Done(): log.Infof("[table_worker] canceled by context") return default: continue } } } func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) { columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err)) } statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name) allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement) if err != nil { log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err)) } for i := range allData { rowPtrs := allData[i] msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{}) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err)) } } } func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error { p, ok := msg.InputContext.(position_store.MySQLTablePosition) if !ok { return errors.Errorf("type invalid") } tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p) return nil } func (tableScanner *TableScanner) initTableDDL(table *schema_store.Table) error { row := tableScanner.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", table.Schema, table.Name)) var t, create string err := row.Scan(&t, &create) if err != nil { return errors.Trace(err) } msg := NewCreateTableMsg(tableScanner.parser, table, create) if err := tableScanner.emitter.Emit(msg); err != nil { return errors.Trace(err) } <-msg.Done return nil } func GetTableColumnTypes(db *sql.DB, schema string, table string) ([]*sql.ColumnType, error)
func GetScanIdx(columnTypes []*sql.ColumnType, scanColumn string) (int, error) { for i := range columnTypes { if columnTypes[i].Name() == scanColumn { return i, nil } } return 0, errors.Errorf("cannot find scan index") } func newBatchDataPtrs(columnTypes []*sql.ColumnType, batch int) [][]interface{} { ret := make([][]interface{}, batch) for batchIdx := 0; batchIdx < batch; batchIdx++ { vPtrs := make([]interface{}, len(columnTypes)) for columnIdx, _ := range columnTypes { scanType := utils.GetScanType(columnTypes[columnIdx]) vptr := reflect.New(scanType) vPtrs[columnIdx] = vptr.Interface() } ret[batchIdx] = vPtrs } return ret } func NewTableScanner( pipelineName string, tableWorkC chan *TableWork, db *sql.DB, positionStore position_store.MySQLTablePositionStore, emitter core.Emitter, throttle *time.Ticker, schemaStore schema_store.SchemaStore, cfg *PluginConfig, ctx context.Context) *TableScanner { tableScanner := TableScanner{ pipelineName: pipelineName, tableWorkC: tableWorkC, db: db, positionStore: positionStore, emitter: emitter, throttle: throttle, schemaStore: schemaStore, cfg: cfg, ctx: ctx, parser: parser.New(), } return &tableScanner } func String2Val(s string, scanType string) interface{} { var currentMin interface{} var err error if scanType == "string" { currentMin = s } else if scanType == "int" { currentMin, err = strconv.Atoi(s) if err != nil { log.Fatalf("[LoopInBatch] failed to convert string to int: %v", err) } } else { log.Infof("[LoopInBatch] scanColumn not supported") } return currentMin }
{ statement := fmt.Sprintf("SELECT * FROM `%s`.`%s` LIMIT 1", schema, table) rows, err := db.Query(statement) if err != nil { return nil, errors.Trace(err) } defer rows.Close() return rows.ColumnTypes() }
identifier_body
mysql_table_scanner.go
package mysqlbatch import ( "context" "database/sql" "fmt" "reflect" "strconv" "sync" "time"
log "github.com/sirupsen/logrus" "github.com/moiot/gravity/pkg/core" "github.com/moiot/gravity/pkg/metrics" "github.com/moiot/gravity/pkg/mysql" "github.com/moiot/gravity/pkg/position_store" "github.com/moiot/gravity/pkg/schema_store" "github.com/moiot/gravity/pkg/utils" ) var ErrTableEmpty = errors.New("table_scanner: this table is empty") type TableScanner struct { pipelineName string tableWorkC chan *TableWork cfg *PluginConfig positionStore position_store.MySQLTablePositionStore db *sql.DB emitter core.Emitter throttle *time.Ticker ctx context.Context schemaStore schema_store.SchemaStore wg sync.WaitGroup parser *parser.Parser } func (tableScanner *TableScanner) Start() error { tableScanner.wg.Add(1) go func() { defer tableScanner.wg.Done() for { select { case work, ok := <-tableScanner.tableWorkC: if !ok { log.Infof("[TableScanner] queue closed, exit") return } err := tableScanner.initTableDDL(work.TableDef) if err != nil { log.Fatalf("[TableScanner] initTableDDL for %s.%s, err: %s", work.TableDef.Schema, work.TableDef.Name, err) } err = tableScanner.InitTablePosition(work.TableDef, work.TableConfig) if err == ErrTableEmpty { log.Infof("[TableScanner] Target table is empty. schema: %v, table: %v", work.TableDef.Schema, work.TableDef.Name) continue } else if err != nil { log.Fatalf("[TableScanner] InitTablePosition failed: %v", errors.ErrorStack(err)) } max, min, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(work.TableDef.Schema, work.TableDef.Name)) log.Infof("positionStore.GetMaxMin: max value type: %v, max %v; min value type: %v, min %v", reflect.TypeOf(max.Value), max, reflect.TypeOf(min.Value), min) scanColumn := max.Column if !ok { log.Fatalf("[table_scanner] failed to find max min") } // If the scan column is *, then we do a full dump of the table if scanColumn == "*" { tableScanner.FindAll(tableScanner.db, work.TableDef, work.TableConfig) } else { tableScanner.LoopInBatch( tableScanner.db, work.TableDef, work.TableConfig, scanColumn, max, min, tableScanner.cfg.TableScanBatch) if tableScanner.ctx.Err() == nil { log.Infof("[table_worker] LoopInBatch done with table %s", work.TableDef.Name) } else if tableScanner.ctx.Err() == context.Canceled { log.Infof("[TableScanner] LoopInBatch canceled") return } else { log.Fatalf("[TableScanner] LoopInBatch unknow case,err: %v", tableScanner.ctx.Err()) } } case <-tableScanner.ctx.Done(): log.Infof("[TableScanner] canceled by context") return } } }() return nil } func (tableScanner *TableScanner) InitTablePosition(tableDef *schema_store.Table, tableConfig *TableConfig) error { _, _, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { log.Infof("[InitTablePosition] init table position") // detect scan column first var scanColumn string var scanType string column, err := DetectScanColumn(tableScanner.db, tableDef.Schema, tableDef.Name, tableScanner.cfg.MaxFullDumpCount) if err != nil { return errors.Trace(err) } scanColumn = column if scanColumn == "*" { maxPos := position_store.MySQLTablePosition{Column: scanColumn} minPos := position_store.MySQLTablePosition{Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) } else { max, min := FindMaxMinValueFromDB(tableScanner.db, tableDef.Schema, tableDef.Name, scanColumn) maxPos := position_store.MySQLTablePosition{Value: max, Type: scanType, Column: scanColumn} empty, err := tableScanner.validateTableEmpty(maxPos) if err != nil { return errors.Trace(err) } if empty { return ErrTableEmpty } minPos := position_store.MySQLTablePosition{Value: min, Type: scanType, Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) log.Infof("[InitTablePosition] PutMaxMin: max value type: %v, max: %v; min value type: %v, min: %v", reflect.TypeOf(maxPos.Value), maxPos, reflect.TypeOf(minPos.Value), minPos) } log.Infof("[InitTablePosition] schema: %v, table: %v, scanColumn: %v", tableDef.Schema, tableDef.Name, scanColumn) } return nil } func (tableScanner *TableScanner) validateTableEmpty(pos position_store.MySQLTablePosition) (bool, error) { mapStr, err := pos.MapString() if err != nil { return false, errors.Trace(err) } return mapStr["value"] == "", nil } func (tableScanner *TableScanner) Wait() { tableScanner.wg.Wait() } // DetectScanColumn find a column that we used to scan the table // SHOW INDEX FROM .. // Pick primary key, if there is only one primary key // If pk not found try using unique index // fail func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) { pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(pks) == 1 { return pks[0], nil } uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(uniqueIndexes) > 0 { return uniqueIndexes[0], nil } rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if rowsCount < maxFullDumpRowsCount { return "*", nil } return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName) } func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) { var max interface{} var min interface{} maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement) maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface() minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement) minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface() return max, min } // LoopInBatch will iterate the table by sql like this: // SELECT * FROM a WHERE some_key > some_value LIMIT 10000 // It will get the min, max value of the column and iterate batch by batch func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) { pipelineName := tableScanner.pipelineName if batch <= 0 { log.Fatalf("[LoopInBatch] batch size is 0") } maxMapString, err := max.MapString() if err != nil { log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err)) } batchIdx := 0 firstLoop := true maxReached := false var statement string currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min) currentMinPos = min } log.Infof("[LoopInBatch] prepare current: %v", currentMinPos) currentMinValue := currentMinPos.Value resultCount := 0 columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err)) } scanIdx, err := GetScanIdx(columnTypes, scanColumn) if err != nil { log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err)) } rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch) for { if firstLoop { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) firstLoop = false } else { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) } <-tableScanner.throttle.C queryStartTime := time.Now() rows, err := db.Query(statement, currentMinValue, batch) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err) } rowIdx := 0 for rows.Next() { metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1) resultCount++ rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx]) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err)) } currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface() rowIdx++ if mysql.MySQLDataEquals(max.Value, currentMinValue) { maxReached = true break } } err = rows.Err() if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err) } rows.Close() // no result found for this query if rowIdx == 0 { log.Infof("[TableScanner] query result is 0, return") return } metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds()) batchIdx++ var lastMsg *core.Msg // process this batch's data for i := 0; i < rowIdx; i++ { rowPtrs := rowsBatchDataPtrs[i] posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface()) position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn} msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err)) } lastMsg = msg } log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v", tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount) // we break the loop here in case the currentMinPos comes larger than the max we have in the beginning. if maxReached { log.Infof("[LoopInBatch] max reached") if lastMsg != nil { <-lastMsg.Done // close the stream msg := NewCloseInputStreamMsg(tableDef) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err)) } log.Infof("[LoopInBatch] sent close input stream msg") } return } select { case <-tableScanner.ctx.Done(): log.Infof("[table_worker] canceled by context") return default: continue } } } func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) { columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err)) } statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name) allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement) if err != nil { log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err)) } for i := range allData { rowPtrs := allData[i] msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{}) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err)) } } } func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error { p, ok := msg.InputContext.(position_store.MySQLTablePosition) if !ok { return errors.Errorf("type invalid") } tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p) return nil } func (tableScanner *TableScanner) initTableDDL(table *schema_store.Table) error { row := tableScanner.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", table.Schema, table.Name)) var t, create string err := row.Scan(&t, &create) if err != nil { return errors.Trace(err) } msg := NewCreateTableMsg(tableScanner.parser, table, create) if err := tableScanner.emitter.Emit(msg); err != nil { return errors.Trace(err) } <-msg.Done return nil } func GetTableColumnTypes(db *sql.DB, schema string, table string) ([]*sql.ColumnType, error) { statement := fmt.Sprintf("SELECT * FROM `%s`.`%s` LIMIT 1", schema, table) rows, err := db.Query(statement) if err != nil { return nil, errors.Trace(err) } defer rows.Close() return rows.ColumnTypes() } func GetScanIdx(columnTypes []*sql.ColumnType, scanColumn string) (int, error) { for i := range columnTypes { if columnTypes[i].Name() == scanColumn { return i, nil } } return 0, errors.Errorf("cannot find scan index") } func newBatchDataPtrs(columnTypes []*sql.ColumnType, batch int) [][]interface{} { ret := make([][]interface{}, batch) for batchIdx := 0; batchIdx < batch; batchIdx++ { vPtrs := make([]interface{}, len(columnTypes)) for columnIdx, _ := range columnTypes { scanType := utils.GetScanType(columnTypes[columnIdx]) vptr := reflect.New(scanType) vPtrs[columnIdx] = vptr.Interface() } ret[batchIdx] = vPtrs } return ret } func NewTableScanner( pipelineName string, tableWorkC chan *TableWork, db *sql.DB, positionStore position_store.MySQLTablePositionStore, emitter core.Emitter, throttle *time.Ticker, schemaStore schema_store.SchemaStore, cfg *PluginConfig, ctx context.Context) *TableScanner { tableScanner := TableScanner{ pipelineName: pipelineName, tableWorkC: tableWorkC, db: db, positionStore: positionStore, emitter: emitter, throttle: throttle, schemaStore: schemaStore, cfg: cfg, ctx: ctx, parser: parser.New(), } return &tableScanner } func String2Val(s string, scanType string) interface{} { var currentMin interface{} var err error if scanType == "string" { currentMin = s } else if scanType == "int" { currentMin, err = strconv.Atoi(s) if err != nil { log.Fatalf("[LoopInBatch] failed to convert string to int: %v", err) } } else { log.Infof("[LoopInBatch] scanColumn not supported") } return currentMin }
"github.com/juju/errors" "github.com/pingcap/parser"
random_line_split
mysql_table_scanner.go
package mysqlbatch import ( "context" "database/sql" "fmt" "reflect" "strconv" "sync" "time" "github.com/juju/errors" "github.com/pingcap/parser" log "github.com/sirupsen/logrus" "github.com/moiot/gravity/pkg/core" "github.com/moiot/gravity/pkg/metrics" "github.com/moiot/gravity/pkg/mysql" "github.com/moiot/gravity/pkg/position_store" "github.com/moiot/gravity/pkg/schema_store" "github.com/moiot/gravity/pkg/utils" ) var ErrTableEmpty = errors.New("table_scanner: this table is empty") type TableScanner struct { pipelineName string tableWorkC chan *TableWork cfg *PluginConfig positionStore position_store.MySQLTablePositionStore db *sql.DB emitter core.Emitter throttle *time.Ticker ctx context.Context schemaStore schema_store.SchemaStore wg sync.WaitGroup parser *parser.Parser } func (tableScanner *TableScanner) Start() error { tableScanner.wg.Add(1) go func() { defer tableScanner.wg.Done() for { select { case work, ok := <-tableScanner.tableWorkC: if !ok { log.Infof("[TableScanner] queue closed, exit") return } err := tableScanner.initTableDDL(work.TableDef) if err != nil { log.Fatalf("[TableScanner] initTableDDL for %s.%s, err: %s", work.TableDef.Schema, work.TableDef.Name, err) } err = tableScanner.InitTablePosition(work.TableDef, work.TableConfig) if err == ErrTableEmpty { log.Infof("[TableScanner] Target table is empty. schema: %v, table: %v", work.TableDef.Schema, work.TableDef.Name) continue } else if err != nil { log.Fatalf("[TableScanner] InitTablePosition failed: %v", errors.ErrorStack(err)) } max, min, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(work.TableDef.Schema, work.TableDef.Name)) log.Infof("positionStore.GetMaxMin: max value type: %v, max %v; min value type: %v, min %v", reflect.TypeOf(max.Value), max, reflect.TypeOf(min.Value), min) scanColumn := max.Column if !ok { log.Fatalf("[table_scanner] failed to find max min") } // If the scan column is *, then we do a full dump of the table if scanColumn == "*" { tableScanner.FindAll(tableScanner.db, work.TableDef, work.TableConfig) } else { tableScanner.LoopInBatch( tableScanner.db, work.TableDef, work.TableConfig, scanColumn, max, min, tableScanner.cfg.TableScanBatch) if tableScanner.ctx.Err() == nil { log.Infof("[table_worker] LoopInBatch done with table %s", work.TableDef.Name) } else if tableScanner.ctx.Err() == context.Canceled { log.Infof("[TableScanner] LoopInBatch canceled") return } else { log.Fatalf("[TableScanner] LoopInBatch unknow case,err: %v", tableScanner.ctx.Err()) } } case <-tableScanner.ctx.Done(): log.Infof("[TableScanner] canceled by context") return } } }() return nil } func (tableScanner *TableScanner) InitTablePosition(tableDef *schema_store.Table, tableConfig *TableConfig) error { _, _, ok := tableScanner.positionStore.GetMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { log.Infof("[InitTablePosition] init table position") // detect scan column first var scanColumn string var scanType string column, err := DetectScanColumn(tableScanner.db, tableDef.Schema, tableDef.Name, tableScanner.cfg.MaxFullDumpCount) if err != nil { return errors.Trace(err) } scanColumn = column if scanColumn == "*" { maxPos := position_store.MySQLTablePosition{Column: scanColumn} minPos := position_store.MySQLTablePosition{Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) } else { max, min := FindMaxMinValueFromDB(tableScanner.db, tableDef.Schema, tableDef.Name, scanColumn) maxPos := position_store.MySQLTablePosition{Value: max, Type: scanType, Column: scanColumn} empty, err := tableScanner.validateTableEmpty(maxPos) if err != nil { return errors.Trace(err) } if empty { return ErrTableEmpty } minPos := position_store.MySQLTablePosition{Value: min, Type: scanType, Column: scanColumn} tableScanner.positionStore.PutMaxMin(utils.TableIdentity(tableDef.Schema, tableDef.Name), maxPos, minPos) log.Infof("[InitTablePosition] PutMaxMin: max value type: %v, max: %v; min value type: %v, min: %v", reflect.TypeOf(maxPos.Value), maxPos, reflect.TypeOf(minPos.Value), minPos) } log.Infof("[InitTablePosition] schema: %v, table: %v, scanColumn: %v", tableDef.Schema, tableDef.Name, scanColumn) } return nil } func (tableScanner *TableScanner) validateTableEmpty(pos position_store.MySQLTablePosition) (bool, error) { mapStr, err := pos.MapString() if err != nil { return false, errors.Trace(err) } return mapStr["value"] == "", nil } func (tableScanner *TableScanner) Wait() { tableScanner.wg.Wait() } // DetectScanColumn find a column that we used to scan the table // SHOW INDEX FROM .. // Pick primary key, if there is only one primary key // If pk not found try using unique index // fail func DetectScanColumn(sourceDB *sql.DB, dbName string, tableName string, maxFullDumpRowsCount int) (string, error) { pks, err := utils.GetPrimaryKeys(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(pks) == 1 { return pks[0], nil } uniqueIndexes, err := utils.GetUniqueIndexesWithoutPks(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if len(uniqueIndexes) > 0 { return uniqueIndexes[0], nil } rowsCount, err := utils.EstimateRowsCount(sourceDB, dbName, tableName) if err != nil { return "", errors.Trace(err) } if rowsCount < maxFullDumpRowsCount { return "*", nil } return "", errors.Errorf("no scan column can be found automatically for %s.%s", dbName, tableName) } func FindMaxMinValueFromDB(db *sql.DB, dbName string, tableName string, scanColumn string) (interface{}, interface{}) { var max interface{} var min interface{} maxStatement := fmt.Sprintf("SELECT MAX(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", maxStatement) maxRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, maxStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } max = reflect.ValueOf(maxRowPtrs[0][0]).Elem().Interface() minStatement := fmt.Sprintf("SELECT MIN(`%s`) FROM `%s`.`%s`", scanColumn, dbName, tableName) log.Infof("[FindMaxMinValueFromDB] statement: %s", minStatement) minRowPtrs, err := utils.QueryGeneralRowsDataWithSQL(db, minStatement) if err != nil { log.Fatalf("[FindMaxMinValueFromDB] failed to QueryGeneralRowsDataWithSQL, err: %v", errors.ErrorStack(err)) } min = reflect.ValueOf(minRowPtrs[0][0]).Elem().Interface() return max, min } // LoopInBatch will iterate the table by sql like this: // SELECT * FROM a WHERE some_key > some_value LIMIT 10000 // It will get the min, max value of the column and iterate batch by batch func (tableScanner *TableScanner) LoopInBatch(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig, scanColumn string, max position_store.MySQLTablePosition, min position_store.MySQLTablePosition, batch int) { pipelineName := tableScanner.pipelineName if batch <= 0 { log.Fatalf("[LoopInBatch] batch size is 0") } maxMapString, err := max.MapString() if err != nil { log.Fatalf("[LoopInBatch] failed to get maxString, max: %v, err: %v", max, errors.ErrorStack(err)) } batchIdx := 0 firstLoop := true maxReached := false var statement string currentMinPos, ok := tableScanner.positionStore.GetCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name)) if !ok { tableScanner.positionStore.PutCurrent(utils.TableIdentity(tableDef.Schema, tableDef.Name), min) currentMinPos = min } log.Infof("[LoopInBatch] prepare current: %v", currentMinPos) currentMinValue := currentMinPos.Value resultCount := 0 columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[LoopInBatch] failed to get columnType, err: %v", errors.ErrorStack(err)) } scanIdx, err := GetScanIdx(columnTypes, scanColumn) if err != nil { log.Fatalf("[LoopInBatch] failed to get scanIdx, err: %v", errors.ErrorStack(err)) } rowsBatchDataPtrs := newBatchDataPtrs(columnTypes, batch) for { if firstLoop { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s >= ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) firstLoop = false } else { statement = fmt.Sprintf("SELECT * FROM `%s`.`%s` WHERE %s > ? ORDER BY %s LIMIT ?", tableDef.Schema, tableDef.Name, scanColumn, scanColumn) } <-tableScanner.throttle.C queryStartTime := time.Now() rows, err := db.Query(statement, currentMinValue, batch) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, err: %v", tableDef.Schema, tableDef.Name, err) } rowIdx := 0 for rows.Next() { metrics.ScannerJobFetchedCount.WithLabelValues(pipelineName).Add(1) resultCount++ rowsBatchDataPtrs[rowIdx], err = utils.ScanGeneralRowsWithDataPtrs(rows, columnTypes, rowsBatchDataPtrs[rowIdx]) if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, scan error: %v", tableDef.Schema, tableDef.Name, errors.ErrorStack(err)) } currentMinValue = reflect.ValueOf(rowsBatchDataPtrs[rowIdx][scanIdx]).Elem().Interface() rowIdx++ if mysql.MySQLDataEquals(max.Value, currentMinValue) { maxReached = true break } } err = rows.Err() if err != nil { log.Fatalf("[LoopInBatch] table %s.%s, rows err: %v", tableDef.Schema, tableDef.Name, err) } rows.Close() // no result found for this query if rowIdx == 0 { log.Infof("[TableScanner] query result is 0, return") return } metrics.ScannerBatchQueryDuration.WithLabelValues(pipelineName).Observe(time.Now().Sub(queryStartTime).Seconds()) batchIdx++ var lastMsg *core.Msg // process this batch's data for i := 0; i < rowIdx; i++ { rowPtrs := rowsBatchDataPtrs[i] posV := mysql.NormalizeSQLType(reflect.ValueOf(rowPtrs[scanIdx]).Elem().Interface()) position := position_store.MySQLTablePosition{Value: posV, Column: scanColumn} msg := NewMsg(rowPtrs, columnTypes, tableDef, tableScanner.AfterMsgCommit, position) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit job: %v", errors.ErrorStack(err)) } lastMsg = msg } log.Infof("[LoopInBatch] sourceDB: %s, table: %s, currentMinPos: %v, maxMapString.column: %v, maxMapString.value: %v, maxMapString.type: %v, resultCount: %v", tableDef.Schema, tableDef.Name, currentMinValue, maxMapString["column"], maxMapString["value"], maxMapString["type"], resultCount) // we break the loop here in case the currentMinPos comes larger than the max we have in the beginning. if maxReached { log.Infof("[LoopInBatch] max reached") if lastMsg != nil { <-lastMsg.Done // close the stream msg := NewCloseInputStreamMsg(tableDef) if err := tableScanner.emitter.Emit(msg); err != nil { log.Fatalf("[LoopInBatch] failed to emit close stream msg: %v", errors.ErrorStack(err)) } log.Infof("[LoopInBatch] sent close input stream msg") } return } select { case <-tableScanner.ctx.Done(): log.Infof("[table_worker] canceled by context") return default: continue } } } func (tableScanner *TableScanner) FindAll(db *sql.DB, tableDef *schema_store.Table, tableConfig *TableConfig) { columnTypes, err := GetTableColumnTypes(db, tableDef.Schema, tableDef.Name) if err != nil { log.Fatalf("[FindAll] failed to get columnType: %v", errors.ErrorStack(err)) } statement := fmt.Sprintf("SELECT * FROM `%s`.`%s`", tableDef.Schema, tableDef.Name) allData, err := utils.QueryGeneralRowsDataWithSQL(db, statement) if err != nil { log.Fatalf("[FindAll] failed to find all, err: %v", errors.ErrorStack(err)) } for i := range allData { rowPtrs := allData[i] msg := NewMsg(rowPtrs, columnTypes, tableDef, nil, position_store.MySQLTablePosition{}) if err := tableScanner.emitter.Emit(msg); err != nil
} } func (tableScanner *TableScanner) AfterMsgCommit(msg *core.Msg) error { p, ok := msg.InputContext.(position_store.MySQLTablePosition) if !ok { return errors.Errorf("type invalid") } tableScanner.positionStore.PutCurrent(*msg.InputStreamKey, p) return nil } func (tableScanner *TableScanner) initTableDDL(table *schema_store.Table) error { row := tableScanner.db.QueryRow(fmt.Sprintf("SHOW CREATE TABLE `%s`.`%s`", table.Schema, table.Name)) var t, create string err := row.Scan(&t, &create) if err != nil { return errors.Trace(err) } msg := NewCreateTableMsg(tableScanner.parser, table, create) if err := tableScanner.emitter.Emit(msg); err != nil { return errors.Trace(err) } <-msg.Done return nil } func GetTableColumnTypes(db *sql.DB, schema string, table string) ([]*sql.ColumnType, error) { statement := fmt.Sprintf("SELECT * FROM `%s`.`%s` LIMIT 1", schema, table) rows, err := db.Query(statement) if err != nil { return nil, errors.Trace(err) } defer rows.Close() return rows.ColumnTypes() } func GetScanIdx(columnTypes []*sql.ColumnType, scanColumn string) (int, error) { for i := range columnTypes { if columnTypes[i].Name() == scanColumn { return i, nil } } return 0, errors.Errorf("cannot find scan index") } func newBatchDataPtrs(columnTypes []*sql.ColumnType, batch int) [][]interface{} { ret := make([][]interface{}, batch) for batchIdx := 0; batchIdx < batch; batchIdx++ { vPtrs := make([]interface{}, len(columnTypes)) for columnIdx, _ := range columnTypes { scanType := utils.GetScanType(columnTypes[columnIdx]) vptr := reflect.New(scanType) vPtrs[columnIdx] = vptr.Interface() } ret[batchIdx] = vPtrs } return ret } func NewTableScanner( pipelineName string, tableWorkC chan *TableWork, db *sql.DB, positionStore position_store.MySQLTablePositionStore, emitter core.Emitter, throttle *time.Ticker, schemaStore schema_store.SchemaStore, cfg *PluginConfig, ctx context.Context) *TableScanner { tableScanner := TableScanner{ pipelineName: pipelineName, tableWorkC: tableWorkC, db: db, positionStore: positionStore, emitter: emitter, throttle: throttle, schemaStore: schemaStore, cfg: cfg, ctx: ctx, parser: parser.New(), } return &tableScanner } func String2Val(s string, scanType string) interface{} { var currentMin interface{} var err error if scanType == "string" { currentMin = s } else if scanType == "int" { currentMin, err = strconv.Atoi(s) if err != nil { log.Fatalf("[LoopInBatch] failed to convert string to int: %v", err) } } else { log.Infof("[LoopInBatch] scanColumn not supported") } return currentMin }
{ log.Fatalf("[tableScanner] failed to emit: %v", errors.ErrorStack(err)) }
conditional_block
index.ts
import { ATOM, IAtom, IComposedAtom, IConfig, ICssPropToToken, IScreens, ISheet, IThemeAtom, ITokensDefinition, TCss, } from "./types"; import { createSheets, cssPropToToken, getVendorPrefixAndProps, hashString, specificityProps, } from "./utils"; export * from "./types"; export * from "./css-types"; export const hotReloadingCache = new Map<string, any>(); const toStringCachedAtom = function (this: IAtom) { return this._className!; }; const toStringCompose = function (this: IComposedAtom) { const className = this.atoms.map((atom) => atom.toString()).join(" "); // cache the className on this instance // @ts-ignore this._className = className; // @ts-ignore this.toString = toStringCachedAtom; return className; }; const createToString = ( sheets: { [screen: string]: ISheet }, screens: IScreens = {}, cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?], // [className, pseudo] preInjectedRules: Set<string> ) => { let seq = 0; return function toString(this: IAtom) { const className = cssClassnameProvider( this, preInjectedRules.size ? null : seq++ ); const shouldInject = !preInjectedRules.size || !preInjectedRules.has(`.${className[0]}`); const value = this.value; if (shouldInject) { let cssRule = ""; if (className.length === 2) { cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`; } else { cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`; } sheets[this.screen].insertRule( this.screen ? screens[this.screen](cssRule) : cssRule ); } // We are switching this atom from IAtom simpler representation // 1. delete everything but `id` for specificity check // @ts-ignore this.cssHyphenProp = this.value = this.pseudo = this.screen = undefined; // 2. put on a _className this._className = className[0]; // 3. switch from this `toString` to a much simpler one this.toString = toStringCachedAtom; return className[0]; }; }; const createServerToString = ( sheets: { [screen: string]: ISheet }, screens: IScreens = {}, cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?] // [className, pseudo] ) => { return function toString(this: IAtom) { const className = cssClassnameProvider(this, null); const value = this.value; let cssRule = ""; if (className.length === 2) { cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`; } else { cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`; } sheets[this.screen].insertRule( this.screen ? screens[this.screen](cssRule) : cssRule ); // We do not clean out the atom here, cause it will be reused // to inject multiple times for each request // 1. put on a _className this._className = className[0]; // 2. switch from this `toString` to a much simpler one this.toString = toStringCachedAtom; return className[0]; }; }; const createThemeToString = (classPrefix: string, variablesSheet: ISheet) => function toString(this: IThemeAtom) { const themeClassName = `${classPrefix ? `${classPrefix}-` : ""}theme-${ this.name }`; // @ts-ignore variablesSheet.insertRule( `.${themeClassName}{${Object.keys(this.definition).reduce( (aggr, tokenType) => { // @ts-ignore return `${aggr}${Object.keys(this.definition[tokenType]).reduce( (subAggr, tokenKey) => { // @ts-ignore return `${subAggr}--${tokenType}-${tokenKey}:${this.definition[tokenType][tokenKey]};`; }, aggr )}`; }, "" )}}` ); this.toString = () => themeClassName; return themeClassName; }; const composeIntoMap = ( map: Map<string, IAtom>, atoms: (IAtom | IComposedAtom)[] ) => { let i = atoms.length - 1; for (; i >= 0; i--) { const item = atoms[i]; // atoms can be undefined, null, false or '' using ternary like // expressions with the properties if (item && item[ATOM] && "atoms" in item) { composeIntoMap(map, item.atoms); } else if (item && item[ATOM]) { if (!map.has((item as IAtom).id)) { map.set((item as IAtom).id, item as IAtom); } } else if (item) { map.set((item as unknown) as string, item as IAtom); } } }; export const createTokens = <T extends ITokensDefinition>(tokens: T) => { return tokens; }; export const createCss = <T extends IConfig>( config: T, env: Window | null = typeof window === "undefined" ? null : window ): TCss<T> => { const showFriendlyClassnames = typeof config.showFriendlyClassnames === "boolean" ? config.showFriendlyClassnames : process.env.NODE_ENV === "development"; const prefix = config.prefix || ""; const { vendorPrefix, vendorProps } = env ? getVendorPrefixAndProps(env) : { vendorPrefix: "-node-", vendorProps: [] }; if (env && hotReloadingCache.has(prefix)) { const instance = hotReloadingCache.get(prefix); instance.dispose(); } // pre-compute class prefix const classPrefix = prefix ? showFriendlyClassnames ? `${prefix}_` : prefix : ""; const cssClassnameProvider = ( atom: IAtom, seq: number | null ): [string, string?] => { const hash = seq === null ? hashString( `${atom.screen || ""}${atom.cssHyphenProp.replace( /-(moz|webkit|ms)-/, "" )}${atom.pseudo || ""}${atom.value}` ) : seq; const name = showFriendlyClassnames ? `${atom.screen ? `${atom.screen}_` : ""}${atom.cssHyphenProp .replace(/-(moz|webkit|ms)-/, "") .split("-") .map((part) => part[0]) .join("")}_${hash}` : `_${hash}`; const className = `${classPrefix}${name}`; if (atom.pseudo) { return [className, atom.pseudo]; } return [className]; }; const { tags, sheets } = createSheets(env, config.screens); const preInjectedRules = new Set<string>(); // tslint:disable-next-line for (const sheet in sheets) { for (let x = 0; x < sheets[sheet].cssRules.length; x++) { preInjectedRules.add(sheets[sheet].cssRules[x].selectorText); } } let toString = env ? createToString( sheets, config.screens, cssClassnameProvider, preInjectedRules ) : createServerToString(sheets, config.screens, cssClassnameProvider); let themeToString = createThemeToString(classPrefix, sheets.__variables__); const compose = (...atoms: IAtom[]): IComposedAtom => { const map = new Map<string, IAtom>(); composeIntoMap(map, atoms); return { atoms: Array.from(map.values()), toString: toStringCompose, [ATOM]: true, }; }; const createAtom = ( cssProp: string, value: any, screen = "", pseudo?: string ) => { const token: any = cssPropToToken[cssProp as keyof ICssPropToToken<any>]; let tokenValue: any; if (token) { if (Array.isArray(token) && Array.isArray(value)) { tokenValue = token.map((tokenName, index) => token && (tokens as any)[tokenName] && (tokens as any)[tokenName][value[index]] ? (tokens as any)[tokenName][value[index]] : value[index] ); } else { tokenValue = token && (tokens as any)[token] && (tokens as any)[token][value] ? (tokens as any)[token][value] : value; } } else { tokenValue = value; } const isVendorPrefixed = cssProp[0] === cssProp[0].toUpperCase(); // generate id used for specificity check // two atoms are considered equal in regared to there specificity if the id is equal const id = cssProp.toLowerCase() + (pseudo ? pseudo.split(":").sort().join(":") : "") + screen; // make a uid accouting for different values const uid = id + value; // If this was created before return the cached atom if (atomCache.has(uid)) { return atomCache.get(uid)!; } // prepare the cssProp let cssHyphenProp = cssProp .split(/(?=[A-Z])/) .map((g) => g.toLowerCase()) .join("-"); if (isVendorPrefixed) { cssHyphenProp = `-${cssHyphenProp}`; } else if (vendorProps.includes(`${vendorPrefix}${cssHyphenProp}`)) { cssHyphenProp = `${vendorPrefix}${cssHyphenProp}`; } // Create a new atom const atom: IAtom = { id, cssHyphenProp, value: tokenValue, pseudo, screen, toString, [ATOM]: true, }; // Cache it atomCache.set(uid, atom); return atom; }; const createCssAtoms = ( props: { [key: string]: any; }, cb: (atom: IAtom) => void, screen = "", pseudo: string[] = [], canCallUtils = true, canCallSpecificityProps = true ) => { // tslint:disable-next-line for (const prop in props) { if (config.screens && prop in config.screens) { if (screen) { throw new Error( `@stitches/css - You are nesting the screen "${prop}" into "${screen}", that makes no sense? :-)` ); } createCssAtoms(props[prop], cb, prop, pseudo); } else if (!prop[0].match(/[a-zA-Z]/)) { createCssAtoms(props[prop], cb, screen, pseudo.concat(prop)); } else if (canCallUtils && prop in utils)
else if (canCallSpecificityProps && prop in specificityProps) { createCssAtoms( specificityProps[prop](config)(props[prop]) as any, cb, screen, pseudo, false, false ); } else { cb( createAtom( prop, props[prop], screen, pseudo.length ? pseudo.join("") : undefined ) ); } } }; const createUtilsAtoms = ( props: { [key: string]: any; }, cb: (atom: IAtom) => void, screen = "", pseudo: string[] = [], canOverride = true ) => { // tslint:disable-next-line for (const prop in props) { if (prop === "override") { if (!canOverride) { throw new Error( "@stitches/css - You can not override at this level, only at the top level definition" ); } createCssAtoms(props[prop], cb, screen, pseudo); } else if (config.screens && prop in config.screens) { if (screen) { throw new Error( `@stitches/css - You are nesting the screen "${prop}" into "${screen}", that makes no sense? :-)` ); } createUtilsAtoms(props[prop], cb, prop, pseudo, false); } else if (!prop[0].match(/[a-zA-Z]/)) { createUtilsAtoms(props[prop], cb, screen, pseudo.concat(prop), false); } else if (prop in utils) { createCssAtoms( utils[prop](config)(props[prop]) as any, cb, screen, pseudo, false ); } else { throw new Error( `@stitches/css - The prop "${prop}" is not a valid utility` ); } } }; // pre-checked config to avoid checking these all the time const screens = config.screens || {}; const utils = config.utils || {}; const tokens = config.tokens || {}; let baseTokens = ":root{"; // tslint:disable-next-line for (const tokenType in tokens) { // @ts-ignore // tslint:disable-next-line for (const token in tokens[tokenType]) { const cssvar = `--${tokenType}-${token}`; // @ts-ignore baseTokens += `${cssvar}:${tokens[tokenType][token]};`; // @ts-ignore tokens[tokenType][token] = `var(${cssvar})`; } } baseTokens += "}"; if (!preInjectedRules.has(":root")) { sheets.__variables__.insertRule(baseTokens); } // atom cache const atomCache = new Map<string, IAtom>(); const themeCache = new Map<ITokensDefinition, IThemeAtom>(); const cssInstance = ((...definitions: any[]) => { const args: any[] = []; let index = 0; for (let x = 0; x < definitions.length; x++) { if (!definitions[x]) { continue; } if (typeof definitions[x] === "string" || definitions[x][ATOM]) { args[index++] = definitions[x]; } else if (config.utilityFirst) { createUtilsAtoms(definitions[x], (atom) => { args[index++] = atom; }); } else { createCssAtoms(definitions[x], (atom) => { args[index++] = atom; }); } } return compose(...args); }) as any; cssInstance.dispose = () => { atomCache.clear(); tags.forEach((tag) => { tag.parentNode?.removeChild(tag); }); }; cssInstance._config = () => config; cssInstance.theme = (definition: any): IThemeAtom => { if (themeCache.has(definition)) { return themeCache.get(definition)!; } const themeAtom = { // We could here also check if theme has been added from server, // though thinking it does not matter... just a simple rule name: String(themeCache.size), definition, toString: themeToString, [ATOM]: true as true, }; themeCache.set(definition, themeAtom); return themeAtom; }; cssInstance.getStyles = (cb: any) => { // tslint:disable-next-line for (let sheet in sheets) { sheets[sheet].content = ""; } if (baseTokens) { sheets.__variables__.insertRule(baseTokens); } // We have to reset our toStrings so that they will now inject again, // and still cache is it is being reused toString = createServerToString( sheets, config.screens, cssClassnameProvider ); // We have to reset our themeToStrings so that they will now inject again, // and still cache is it is being reused themeToString = createThemeToString(classPrefix, sheets.__variables__); atomCache.forEach((atom) => { atom.toString = toString; }); themeCache.forEach((atom) => { atom.toString = themeToString; }); const result = cb(); return { result, styles: Object.keys(screens).reduce( (aggr, key) => { return aggr.concat(`/* STITCHES:${key} */\n${sheets[key].content}`); }, [ `/* STITCHES:__variables__ */\n${sheets.__variables__.content}`, `/* STITCHES */\n${sheets[""].content}`, ] ), }; }; if (env) { hotReloadingCache.set(prefix, cssInstance); } return cssInstance; };
{ createCssAtoms( utils[prop](config)(props[prop]) as any, cb, screen, pseudo, false ); }
conditional_block
index.ts
import { ATOM, IAtom, IComposedAtom, IConfig, ICssPropToToken, IScreens, ISheet, IThemeAtom, ITokensDefinition, TCss, } from "./types"; import { createSheets, cssPropToToken, getVendorPrefixAndProps, hashString, specificityProps, } from "./utils"; export * from "./types"; export * from "./css-types"; export const hotReloadingCache = new Map<string, any>(); const toStringCachedAtom = function (this: IAtom) { return this._className!; }; const toStringCompose = function (this: IComposedAtom) {
this._className = className; // @ts-ignore this.toString = toStringCachedAtom; return className; }; const createToString = ( sheets: { [screen: string]: ISheet }, screens: IScreens = {}, cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?], // [className, pseudo] preInjectedRules: Set<string> ) => { let seq = 0; return function toString(this: IAtom) { const className = cssClassnameProvider( this, preInjectedRules.size ? null : seq++ ); const shouldInject = !preInjectedRules.size || !preInjectedRules.has(`.${className[0]}`); const value = this.value; if (shouldInject) { let cssRule = ""; if (className.length === 2) { cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`; } else { cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`; } sheets[this.screen].insertRule( this.screen ? screens[this.screen](cssRule) : cssRule ); } // We are switching this atom from IAtom simpler representation // 1. delete everything but `id` for specificity check // @ts-ignore this.cssHyphenProp = this.value = this.pseudo = this.screen = undefined; // 2. put on a _className this._className = className[0]; // 3. switch from this `toString` to a much simpler one this.toString = toStringCachedAtom; return className[0]; }; }; const createServerToString = ( sheets: { [screen: string]: ISheet }, screens: IScreens = {}, cssClassnameProvider: (atom: IAtom, seq: number | null) => [string, string?] // [className, pseudo] ) => { return function toString(this: IAtom) { const className = cssClassnameProvider(this, null); const value = this.value; let cssRule = ""; if (className.length === 2) { cssRule = `.${className[0]}${className[1]}{${this.cssHyphenProp}:${value};}`; } else { cssRule = `.${className[0]}{${this.cssHyphenProp}:${value};}`; } sheets[this.screen].insertRule( this.screen ? screens[this.screen](cssRule) : cssRule ); // We do not clean out the atom here, cause it will be reused // to inject multiple times for each request // 1. put on a _className this._className = className[0]; // 2. switch from this `toString` to a much simpler one this.toString = toStringCachedAtom; return className[0]; }; }; const createThemeToString = (classPrefix: string, variablesSheet: ISheet) => function toString(this: IThemeAtom) { const themeClassName = `${classPrefix ? `${classPrefix}-` : ""}theme-${ this.name }`; // @ts-ignore variablesSheet.insertRule( `.${themeClassName}{${Object.keys(this.definition).reduce( (aggr, tokenType) => { // @ts-ignore return `${aggr}${Object.keys(this.definition[tokenType]).reduce( (subAggr, tokenKey) => { // @ts-ignore return `${subAggr}--${tokenType}-${tokenKey}:${this.definition[tokenType][tokenKey]};`; }, aggr )}`; }, "" )}}` ); this.toString = () => themeClassName; return themeClassName; }; const composeIntoMap = ( map: Map<string, IAtom>, atoms: (IAtom | IComposedAtom)[] ) => { let i = atoms.length - 1; for (; i >= 0; i--) { const item = atoms[i]; // atoms can be undefined, null, false or '' using ternary like // expressions with the properties if (item && item[ATOM] && "atoms" in item) { composeIntoMap(map, item.atoms); } else if (item && item[ATOM]) { if (!map.has((item as IAtom).id)) { map.set((item as IAtom).id, item as IAtom); } } else if (item) { map.set((item as unknown) as string, item as IAtom); } } }; export const createTokens = <T extends ITokensDefinition>(tokens: T) => { return tokens; }; export const createCss = <T extends IConfig>( config: T, env: Window | null = typeof window === "undefined" ? null : window ): TCss<T> => { const showFriendlyClassnames = typeof config.showFriendlyClassnames === "boolean" ? config.showFriendlyClassnames : process.env.NODE_ENV === "development"; const prefix = config.prefix || ""; const { vendorPrefix, vendorProps } = env ? getVendorPrefixAndProps(env) : { vendorPrefix: "-node-", vendorProps: [] }; if (env && hotReloadingCache.has(prefix)) { const instance = hotReloadingCache.get(prefix); instance.dispose(); } // pre-compute class prefix const classPrefix = prefix ? showFriendlyClassnames ? `${prefix}_` : prefix : ""; const cssClassnameProvider = ( atom: IAtom, seq: number | null ): [string, string?] => { const hash = seq === null ? hashString( `${atom.screen || ""}${atom.cssHyphenProp.replace( /-(moz|webkit|ms)-/, "" )}${atom.pseudo || ""}${atom.value}` ) : seq; const name = showFriendlyClassnames ? `${atom.screen ? `${atom.screen}_` : ""}${atom.cssHyphenProp .replace(/-(moz|webkit|ms)-/, "") .split("-") .map((part) => part[0]) .join("")}_${hash}` : `_${hash}`; const className = `${classPrefix}${name}`; if (atom.pseudo) { return [className, atom.pseudo]; } return [className]; }; const { tags, sheets } = createSheets(env, config.screens); const preInjectedRules = new Set<string>(); // tslint:disable-next-line for (const sheet in sheets) { for (let x = 0; x < sheets[sheet].cssRules.length; x++) { preInjectedRules.add(sheets[sheet].cssRules[x].selectorText); } } let toString = env ? createToString( sheets, config.screens, cssClassnameProvider, preInjectedRules ) : createServerToString(sheets, config.screens, cssClassnameProvider); let themeToString = createThemeToString(classPrefix, sheets.__variables__); const compose = (...atoms: IAtom[]): IComposedAtom => { const map = new Map<string, IAtom>(); composeIntoMap(map, atoms); return { atoms: Array.from(map.values()), toString: toStringCompose, [ATOM]: true, }; }; const createAtom = ( cssProp: string, value: any, screen = "", pseudo?: string ) => { const token: any = cssPropToToken[cssProp as keyof ICssPropToToken<any>]; let tokenValue: any; if (token) { if (Array.isArray(token) && Array.isArray(value)) { tokenValue = token.map((tokenName, index) => token && (tokens as any)[tokenName] && (tokens as any)[tokenName][value[index]] ? (tokens as any)[tokenName][value[index]] : value[index] ); } else { tokenValue = token && (tokens as any)[token] && (tokens as any)[token][value] ? (tokens as any)[token][value] : value; } } else { tokenValue = value; } const isVendorPrefixed = cssProp[0] === cssProp[0].toUpperCase(); // generate id used for specificity check // two atoms are considered equal in regared to there specificity if the id is equal const id = cssProp.toLowerCase() + (pseudo ? pseudo.split(":").sort().join(":") : "") + screen; // make a uid accouting for different values const uid = id + value; // If this was created before return the cached atom if (atomCache.has(uid)) { return atomCache.get(uid)!; } // prepare the cssProp let cssHyphenProp = cssProp .split(/(?=[A-Z])/) .map((g) => g.toLowerCase()) .join("-"); if (isVendorPrefixed) { cssHyphenProp = `-${cssHyphenProp}`; } else if (vendorProps.includes(`${vendorPrefix}${cssHyphenProp}`)) { cssHyphenProp = `${vendorPrefix}${cssHyphenProp}`; } // Create a new atom const atom: IAtom = { id, cssHyphenProp, value: tokenValue, pseudo, screen, toString, [ATOM]: true, }; // Cache it atomCache.set(uid, atom); return atom; }; const createCssAtoms = ( props: { [key: string]: any; }, cb: (atom: IAtom) => void, screen = "", pseudo: string[] = [], canCallUtils = true, canCallSpecificityProps = true ) => { // tslint:disable-next-line for (const prop in props) { if (config.screens && prop in config.screens) { if (screen) { throw new Error( `@stitches/css - You are nesting the screen "${prop}" into "${screen}", that makes no sense? :-)` ); } createCssAtoms(props[prop], cb, prop, pseudo); } else if (!prop[0].match(/[a-zA-Z]/)) { createCssAtoms(props[prop], cb, screen, pseudo.concat(prop)); } else if (canCallUtils && prop in utils) { createCssAtoms( utils[prop](config)(props[prop]) as any, cb, screen, pseudo, false ); } else if (canCallSpecificityProps && prop in specificityProps) { createCssAtoms( specificityProps[prop](config)(props[prop]) as any, cb, screen, pseudo, false, false ); } else { cb( createAtom( prop, props[prop], screen, pseudo.length ? pseudo.join("") : undefined ) ); } } }; const createUtilsAtoms = ( props: { [key: string]: any; }, cb: (atom: IAtom) => void, screen = "", pseudo: string[] = [], canOverride = true ) => { // tslint:disable-next-line for (const prop in props) { if (prop === "override") { if (!canOverride) { throw new Error( "@stitches/css - You can not override at this level, only at the top level definition" ); } createCssAtoms(props[prop], cb, screen, pseudo); } else if (config.screens && prop in config.screens) { if (screen) { throw new Error( `@stitches/css - You are nesting the screen "${prop}" into "${screen}", that makes no sense? :-)` ); } createUtilsAtoms(props[prop], cb, prop, pseudo, false); } else if (!prop[0].match(/[a-zA-Z]/)) { createUtilsAtoms(props[prop], cb, screen, pseudo.concat(prop), false); } else if (prop in utils) { createCssAtoms( utils[prop](config)(props[prop]) as any, cb, screen, pseudo, false ); } else { throw new Error( `@stitches/css - The prop "${prop}" is not a valid utility` ); } } }; // pre-checked config to avoid checking these all the time const screens = config.screens || {}; const utils = config.utils || {}; const tokens = config.tokens || {}; let baseTokens = ":root{"; // tslint:disable-next-line for (const tokenType in tokens) { // @ts-ignore // tslint:disable-next-line for (const token in tokens[tokenType]) { const cssvar = `--${tokenType}-${token}`; // @ts-ignore baseTokens += `${cssvar}:${tokens[tokenType][token]};`; // @ts-ignore tokens[tokenType][token] = `var(${cssvar})`; } } baseTokens += "}"; if (!preInjectedRules.has(":root")) { sheets.__variables__.insertRule(baseTokens); } // atom cache const atomCache = new Map<string, IAtom>(); const themeCache = new Map<ITokensDefinition, IThemeAtom>(); const cssInstance = ((...definitions: any[]) => { const args: any[] = []; let index = 0; for (let x = 0; x < definitions.length; x++) { if (!definitions[x]) { continue; } if (typeof definitions[x] === "string" || definitions[x][ATOM]) { args[index++] = definitions[x]; } else if (config.utilityFirst) { createUtilsAtoms(definitions[x], (atom) => { args[index++] = atom; }); } else { createCssAtoms(definitions[x], (atom) => { args[index++] = atom; }); } } return compose(...args); }) as any; cssInstance.dispose = () => { atomCache.clear(); tags.forEach((tag) => { tag.parentNode?.removeChild(tag); }); }; cssInstance._config = () => config; cssInstance.theme = (definition: any): IThemeAtom => { if (themeCache.has(definition)) { return themeCache.get(definition)!; } const themeAtom = { // We could here also check if theme has been added from server, // though thinking it does not matter... just a simple rule name: String(themeCache.size), definition, toString: themeToString, [ATOM]: true as true, }; themeCache.set(definition, themeAtom); return themeAtom; }; cssInstance.getStyles = (cb: any) => { // tslint:disable-next-line for (let sheet in sheets) { sheets[sheet].content = ""; } if (baseTokens) { sheets.__variables__.insertRule(baseTokens); } // We have to reset our toStrings so that they will now inject again, // and still cache is it is being reused toString = createServerToString( sheets, config.screens, cssClassnameProvider ); // We have to reset our themeToStrings so that they will now inject again, // and still cache is it is being reused themeToString = createThemeToString(classPrefix, sheets.__variables__); atomCache.forEach((atom) => { atom.toString = toString; }); themeCache.forEach((atom) => { atom.toString = themeToString; }); const result = cb(); return { result, styles: Object.keys(screens).reduce( (aggr, key) => { return aggr.concat(`/* STITCHES:${key} */\n${sheets[key].content}`); }, [ `/* STITCHES:__variables__ */\n${sheets.__variables__.content}`, `/* STITCHES */\n${sheets[""].content}`, ] ), }; }; if (env) { hotReloadingCache.set(prefix, cssInstance); } return cssInstance; };
const className = this.atoms.map((atom) => atom.toString()).join(" "); // cache the className on this instance // @ts-ignore
random_line_split
telegraf.go
/* * Copyright 2020 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agents import ( "bytes" "context" "fmt" "github.com/pkg/errors" "github.com/racker/salus-telemetry-envoy/config" "github.com/racker/salus-telemetry-envoy/lineproto" "github.com/racker/salus-telemetry-protocol/telemetry_edge" "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/viper" "net" "net/http" "os/exec" "path" "path/filepath" "syscall" "text/template" "time" ) var telegrafMainConfigTmpl = template.Must(template.New("telegrafMain").Parse(` [agent] interval = "{{.DefaultMonitoringInterval}}" flush_interval = "{{.MaxFlushInterval}}" flush_jitter = "2s" omit_hostname = true [[outputs.socket_writer]] address = "tcp://{{.IngestAddress}}" data_format = "json" json_timestamp_units = "1ms" [[inputs.internal]] collect_memstats = false `)) var ( telegrafStartupDuration = 10 * time.Second ) const ( telegrafMaxTestMonitorRetries = 3 telegrafTestMonitorRetryDelay = 500 * time.Millisecond ) type telegrafMainConfigData struct { IngestAddress string DefaultMonitoringInterval time.Duration MaxFlushInterval time.Duration } type TelegrafRunner struct { ingestAddress string basePath string running *AgentRunningContext commandHandler CommandHandler configServerMux *http.ServeMux configServerURL string configServerToken string configServerHandler http.HandlerFunc tomlMainConfig []byte // tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId tomlConfigs map[string][]byte } func (tr *TelegrafRunner) PurgeConfig() error { tr.tomlConfigs = make(map[string][]byte) return nil } func init() { registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{}) } func (tr *TelegrafRunner) Load(agentBasePath string) error { tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener) tr.basePath = agentBasePath tr.configServerToken = uuid.NewV4().String() tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("authorization") != "Token "+tr.configServerToken { http.Error(w, "unauthorized", http.StatusUnauthorized) return } _, err := w.Write(tr.concatConfigs()) if err != nil { log.Errorf("Error writing config page %v", err) } } serverId := uuid.NewV4().String() tr.configServerMux = http.NewServeMux() tr.configServerMux.Handle("/"+serverId, tr.configServerHandler) // Get the next available port listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return errors.Wrap(err, "couldn't create http listener") } listenerPort := listener.Addr().(*net.TCPAddr).Port tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId) tr.tomlConfigs = make(map[string][]byte) mainConfig, err := tr.createMainConfig() if err != nil { return errors.Wrap(err, "couldn't create main config") } tr.tomlMainConfig = mainConfig go tr.serve(listener) return nil } func (tr *TelegrafRunner) serve(listener net.Listener) { log.Info("started webServer") err := http.Serve(listener, tr.configServerMux) // Note this is probably not the best way to handle webserver failure log.Fatalf("web server error %v", err) } func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) { tr.commandHandler = handler } func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error { applied := 0 for _, op := range configure.GetOperations() { log.WithField("op", op).Debug("processing telegraf config operation") if tr.handleTelegrafConfigurationOp(op) { applied++ } } if applied == 0 { return &noAppliedConfigsError{} } return nil } func (tr *TelegrafRunner) concatConfigs() []byte
func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool { switch op.GetType() { case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY: var finalConfig []byte var err error finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval) if err != nil { log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML") return false } tr.tomlConfigs[op.GetId()] = finalConfig return true case telemetry_edge.ConfigurationOp_REMOVE: if _, ok := tr.tomlConfigs[op.GetId()]; ok { delete(tr.tomlConfigs, op.GetId()) return true } return false } return false } func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error { resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf") err := addNetRawCapabilities(resolvedExePath) if err != nil { log.WithError(err). WithField("agentExe", resolvedExePath). Warn("failed to set net_raw capabilities on telegraf, native ping will not work") } return nil } func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) { log.Debug("ensuring telegraf is in correct running state") if !tr.hasRequiredPaths() { log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed") tr.commandHandler.Stop(tr.running) return } if tr.running.IsRunning() { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Debug("already running") if applyConfigs { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("signaling config reload") tr.handleConfigReload() } return } runningContext := tr.commandHandler.CreateContext(ctx, telemetry_edge.AgentType_TELEGRAF, tr.exePath(), tr.basePath, "--config", tr.configServerURL) // telegraf returns the INFLUX_TOKEN in the http config request header runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken) err := tr.commandHandler.StartAgentCommand(runningContext, telemetry_edge.AgentType_TELEGRAF, "Loaded inputs:", telegrafStartupDuration) if err != nil { log.WithError(err). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Warn("failed to start agent") return } go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext) tr.running = runningContext log.WithField("pid", runningContext.Pid()). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("started agent") } // exePath returns path to executable relative to baseDir func (tr *TelegrafRunner) exePath() string { return filepath.Join(currentVerLink, binSubpath, "telegraf") } func (tr *TelegrafRunner) Stop() { tr.commandHandler.Stop(tr.running) tr.running = nil } func (tr *TelegrafRunner) createMainConfig() ([]byte, error) { data := &telegrafMainConfigData{ IngestAddress: tr.ingestAddress, DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval), MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval), } var b bytes.Buffer err := telegrafMainConfigTmpl.Execute(&b, data) if err != nil { return nil, errors.Wrap(err, "failed to execute telegraf main config template") } return b.Bytes(), nil } func (tr *TelegrafRunner) handleConfigReload() { if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil { log.WithError(err).WithField("pid", tr.running.Pid()). Warn("failed to signal agent process") } } func (tr *TelegrafRunner) hasRequiredPaths() bool { fullExePath := path.Join(tr.basePath, tr.exePath()) if !fileExists(fullExePath) { log.WithField("exe", fullExePath).Debug("missing exe") return false } return true } func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) { // Convert content to TOML configToml, err := ConvertJsonToTelegrafToml(content, nil, 0) if err != nil { return nil, errors.Wrapf(err, "failed to convert config content") } // Generate token/id used for authenticating and pulling telegraf config testConfigServerToken := uuid.NewV4().String() testConfigServerId := uuid.NewV4().String() // Bind to the next available port by using :0 listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, errors.Wrap(err, "couldn't create http listener") } //noinspection GoUnhandledErrorResult defer listener.Close() listenerPort := listener.Addr().(*net.TCPAddr).Port hostPort := fmt.Sprintf("127.0.0.1:%d", listenerPort) configServerErrors := make(chan error, 2) testConfigRunner := telegrafTestConfigRunnerBuilder(testConfigServerId, testConfigServerToken) // Start the config server configServer := testConfigRunner.StartTestConfigServer(configToml, configServerErrors, listener) // Run the telegraf test command results := &telemetry_edge.TestMonitorResults{ CorrelationId: correlationId, Errors: []string{}, } // Sometimes telegraf --test completes with empty output and no error indicated, // so retry a few times. If that still fails, then a parse error will be produced as without retrying. var cmdOut []byte for attempt := 0; attempt < telegrafMaxTestMonitorRetries; attempt++ { cmdOut, err = testConfigRunner.RunCommand(hostPort, tr.exePath(), tr.basePath, timeout) if err != nil || len(cmdOut) != 0 { break } // wait just a bit between each try time.Sleep(telegrafTestMonitorRetryDelay) } log. WithError(err). WithField("correlationId", correlationId). WithField("content", content). WithField("out", string(cmdOut)). Debug("ran telegraf with test config") if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { exitErrMessage := err.Error() // checking error's message is portable and easy way to determine if the exec timeout was exceeded if exitErrMessage == "signal: killed" { results.Errors = append(results.Errors, "Command took too long to run") } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } results.Errors = append(results.Errors, "Command failed with error output: "+string(exitErr.Stderr)) } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } } else { // ... and process output parsedMetrics, err := lineproto.ParseInfluxLineProtocolMetrics(cmdOut) if err != nil { results.Errors = append(results.Errors, "Failed to parse telegraf output: "+err.Error()) } else { // Wrap up the named tag-value metrics into the general metrics type results.Metrics = make([]*telemetry_edge.Metric, len(parsedMetrics)) for i, metric := range parsedMetrics { results.Metrics[i] = &telemetry_edge.Metric{ Variant: &telemetry_edge.Metric_NameTagValue{NameTagValue: metric}, } } } } // Close out the temporary config server _ = configServer.Close() close(configServerErrors) // ...capture any errors from the config server for err := range configServerErrors { results.Errors = append(results.Errors, "ConfigServer: "+err.Error()) } return results, nil }
{ var configs []byte configs = append(configs, tr.tomlMainConfig...) // telegraf can only handle one 'inputs' header per file so add exactly one here configs = append(configs, []byte("[inputs]")...) for _, v := range tr.tomlConfigs { // remove the other redundant '[inputs]' headers here if bytes.Equal([]byte("[inputs]"), v[0:8]) { v = v[8:] } configs = append(configs, v...) } return configs }
identifier_body
telegraf.go
/* * Copyright 2020 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agents import ( "bytes" "context" "fmt" "github.com/pkg/errors" "github.com/racker/salus-telemetry-envoy/config" "github.com/racker/salus-telemetry-envoy/lineproto" "github.com/racker/salus-telemetry-protocol/telemetry_edge" "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/viper" "net" "net/http" "os/exec" "path" "path/filepath" "syscall" "text/template" "time" ) var telegrafMainConfigTmpl = template.Must(template.New("telegrafMain").Parse(` [agent] interval = "{{.DefaultMonitoringInterval}}" flush_interval = "{{.MaxFlushInterval}}" flush_jitter = "2s" omit_hostname = true [[outputs.socket_writer]] address = "tcp://{{.IngestAddress}}" data_format = "json" json_timestamp_units = "1ms" [[inputs.internal]] collect_memstats = false `)) var ( telegrafStartupDuration = 10 * time.Second ) const ( telegrafMaxTestMonitorRetries = 3 telegrafTestMonitorRetryDelay = 500 * time.Millisecond ) type telegrafMainConfigData struct { IngestAddress string DefaultMonitoringInterval time.Duration MaxFlushInterval time.Duration } type TelegrafRunner struct { ingestAddress string basePath string running *AgentRunningContext commandHandler CommandHandler configServerMux *http.ServeMux configServerURL string configServerToken string configServerHandler http.HandlerFunc tomlMainConfig []byte // tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId tomlConfigs map[string][]byte } func (tr *TelegrafRunner) PurgeConfig() error { tr.tomlConfigs = make(map[string][]byte) return nil } func init() { registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{}) } func (tr *TelegrafRunner) Load(agentBasePath string) error { tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener) tr.basePath = agentBasePath tr.configServerToken = uuid.NewV4().String() tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("authorization") != "Token "+tr.configServerToken { http.Error(w, "unauthorized", http.StatusUnauthorized) return } _, err := w.Write(tr.concatConfigs()) if err != nil { log.Errorf("Error writing config page %v", err) } } serverId := uuid.NewV4().String() tr.configServerMux = http.NewServeMux() tr.configServerMux.Handle("/"+serverId, tr.configServerHandler) // Get the next available port listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return errors.Wrap(err, "couldn't create http listener") } listenerPort := listener.Addr().(*net.TCPAddr).Port tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId) tr.tomlConfigs = make(map[string][]byte) mainConfig, err := tr.createMainConfig() if err != nil { return errors.Wrap(err, "couldn't create main config") } tr.tomlMainConfig = mainConfig go tr.serve(listener) return nil } func (tr *TelegrafRunner) serve(listener net.Listener) { log.Info("started webServer") err := http.Serve(listener, tr.configServerMux) // Note this is probably not the best way to handle webserver failure log.Fatalf("web server error %v", err) } func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) { tr.commandHandler = handler } func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error { applied := 0 for _, op := range configure.GetOperations() { log.WithField("op", op).Debug("processing telegraf config operation") if tr.handleTelegrafConfigurationOp(op) { applied++ } } if applied == 0 { return &noAppliedConfigsError{} } return nil } func (tr *TelegrafRunner) concatConfigs() []byte { var configs []byte configs = append(configs, tr.tomlMainConfig...) // telegraf can only handle one 'inputs' header per file so add exactly one here configs = append(configs, []byte("[inputs]")...) for _, v := range tr.tomlConfigs { // remove the other redundant '[inputs]' headers here if bytes.Equal([]byte("[inputs]"), v[0:8]) { v = v[8:] } configs = append(configs, v...) } return configs } func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool { switch op.GetType() { case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY: var finalConfig []byte var err error finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval) if err != nil { log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML") return false } tr.tomlConfigs[op.GetId()] = finalConfig return true case telemetry_edge.ConfigurationOp_REMOVE: if _, ok := tr.tomlConfigs[op.GetId()]; ok { delete(tr.tomlConfigs, op.GetId()) return true } return false } return false } func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error { resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf") err := addNetRawCapabilities(resolvedExePath) if err != nil { log.WithError(err). WithField("agentExe", resolvedExePath). Warn("failed to set net_raw capabilities on telegraf, native ping will not work") } return nil } func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) { log.Debug("ensuring telegraf is in correct running state") if !tr.hasRequiredPaths() { log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed") tr.commandHandler.Stop(tr.running) return } if tr.running.IsRunning() { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Debug("already running") if applyConfigs { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("signaling config reload") tr.handleConfigReload() } return } runningContext := tr.commandHandler.CreateContext(ctx, telemetry_edge.AgentType_TELEGRAF, tr.exePath(), tr.basePath, "--config", tr.configServerURL) // telegraf returns the INFLUX_TOKEN in the http config request header runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken) err := tr.commandHandler.StartAgentCommand(runningContext, telemetry_edge.AgentType_TELEGRAF, "Loaded inputs:", telegrafStartupDuration) if err != nil { log.WithError(err). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Warn("failed to start agent") return } go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext) tr.running = runningContext log.WithField("pid", runningContext.Pid()). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("started agent") } // exePath returns path to executable relative to baseDir func (tr *TelegrafRunner) exePath() string { return filepath.Join(currentVerLink, binSubpath, "telegraf") } func (tr *TelegrafRunner) Stop() { tr.commandHandler.Stop(tr.running) tr.running = nil } func (tr *TelegrafRunner) createMainConfig() ([]byte, error) { data := &telegrafMainConfigData{ IngestAddress: tr.ingestAddress, DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval), MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval), } var b bytes.Buffer err := telegrafMainConfigTmpl.Execute(&b, data) if err != nil { return nil, errors.Wrap(err, "failed to execute telegraf main config template") } return b.Bytes(), nil } func (tr *TelegrafRunner) handleConfigReload() { if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil { log.WithError(err).WithField("pid", tr.running.Pid()). Warn("failed to signal agent process") } } func (tr *TelegrafRunner) hasRequiredPaths() bool { fullExePath := path.Join(tr.basePath, tr.exePath()) if !fileExists(fullExePath) { log.WithField("exe", fullExePath).Debug("missing exe") return false } return true } func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) { // Convert content to TOML configToml, err := ConvertJsonToTelegrafToml(content, nil, 0) if err != nil { return nil, errors.Wrapf(err, "failed to convert config content") } // Generate token/id used for authenticating and pulling telegraf config testConfigServerToken := uuid.NewV4().String() testConfigServerId := uuid.NewV4().String() // Bind to the next available port by using :0 listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, errors.Wrap(err, "couldn't create http listener") } //noinspection GoUnhandledErrorResult defer listener.Close() listenerPort := listener.Addr().(*net.TCPAddr).Port hostPort := fmt.Sprintf("127.0.0.1:%d", listenerPort) configServerErrors := make(chan error, 2) testConfigRunner := telegrafTestConfigRunnerBuilder(testConfigServerId, testConfigServerToken) // Start the config server configServer := testConfigRunner.StartTestConfigServer(configToml, configServerErrors, listener) // Run the telegraf test command results := &telemetry_edge.TestMonitorResults{ CorrelationId: correlationId, Errors: []string{}, } // Sometimes telegraf --test completes with empty output and no error indicated, // so retry a few times. If that still fails, then a parse error will be produced as without retrying. var cmdOut []byte for attempt := 0; attempt < telegrafMaxTestMonitorRetries; attempt++ { cmdOut, err = testConfigRunner.RunCommand(hostPort, tr.exePath(), tr.basePath, timeout) if err != nil || len(cmdOut) != 0
// wait just a bit between each try time.Sleep(telegrafTestMonitorRetryDelay) } log. WithError(err). WithField("correlationId", correlationId). WithField("content", content). WithField("out", string(cmdOut)). Debug("ran telegraf with test config") if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { exitErrMessage := err.Error() // checking error's message is portable and easy way to determine if the exec timeout was exceeded if exitErrMessage == "signal: killed" { results.Errors = append(results.Errors, "Command took too long to run") } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } results.Errors = append(results.Errors, "Command failed with error output: "+string(exitErr.Stderr)) } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } } else { // ... and process output parsedMetrics, err := lineproto.ParseInfluxLineProtocolMetrics(cmdOut) if err != nil { results.Errors = append(results.Errors, "Failed to parse telegraf output: "+err.Error()) } else { // Wrap up the named tag-value metrics into the general metrics type results.Metrics = make([]*telemetry_edge.Metric, len(parsedMetrics)) for i, metric := range parsedMetrics { results.Metrics[i] = &telemetry_edge.Metric{ Variant: &telemetry_edge.Metric_NameTagValue{NameTagValue: metric}, } } } } // Close out the temporary config server _ = configServer.Close() close(configServerErrors) // ...capture any errors from the config server for err := range configServerErrors { results.Errors = append(results.Errors, "ConfigServer: "+err.Error()) } return results, nil }
{ break }
conditional_block
telegraf.go
/* * Copyright 2020 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agents import ( "bytes" "context" "fmt" "github.com/pkg/errors" "github.com/racker/salus-telemetry-envoy/config" "github.com/racker/salus-telemetry-envoy/lineproto" "github.com/racker/salus-telemetry-protocol/telemetry_edge" "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/viper" "net" "net/http" "os/exec" "path" "path/filepath" "syscall" "text/template" "time" ) var telegrafMainConfigTmpl = template.Must(template.New("telegrafMain").Parse(` [agent] interval = "{{.DefaultMonitoringInterval}}" flush_interval = "{{.MaxFlushInterval}}" flush_jitter = "2s" omit_hostname = true [[outputs.socket_writer]] address = "tcp://{{.IngestAddress}}" data_format = "json" json_timestamp_units = "1ms" [[inputs.internal]] collect_memstats = false `)) var ( telegrafStartupDuration = 10 * time.Second ) const ( telegrafMaxTestMonitorRetries = 3 telegrafTestMonitorRetryDelay = 500 * time.Millisecond ) type telegrafMainConfigData struct { IngestAddress string DefaultMonitoringInterval time.Duration MaxFlushInterval time.Duration } type TelegrafRunner struct { ingestAddress string basePath string running *AgentRunningContext commandHandler CommandHandler configServerMux *http.ServeMux configServerURL string configServerToken string configServerHandler http.HandlerFunc tomlMainConfig []byte // tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId tomlConfigs map[string][]byte } func (tr *TelegrafRunner) PurgeConfig() error { tr.tomlConfigs = make(map[string][]byte) return nil } func init() { registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{}) } func (tr *TelegrafRunner) Load(agentBasePath string) error { tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener) tr.basePath = agentBasePath tr.configServerToken = uuid.NewV4().String() tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("authorization") != "Token "+tr.configServerToken { http.Error(w, "unauthorized", http.StatusUnauthorized) return } _, err := w.Write(tr.concatConfigs()) if err != nil { log.Errorf("Error writing config page %v", err) } } serverId := uuid.NewV4().String() tr.configServerMux = http.NewServeMux() tr.configServerMux.Handle("/"+serverId, tr.configServerHandler) // Get the next available port listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return errors.Wrap(err, "couldn't create http listener") } listenerPort := listener.Addr().(*net.TCPAddr).Port tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId) tr.tomlConfigs = make(map[string][]byte) mainConfig, err := tr.createMainConfig() if err != nil { return errors.Wrap(err, "couldn't create main config") } tr.tomlMainConfig = mainConfig go tr.serve(listener) return nil } func (tr *TelegrafRunner) serve(listener net.Listener) { log.Info("started webServer") err := http.Serve(listener, tr.configServerMux) // Note this is probably not the best way to handle webserver failure log.Fatalf("web server error %v", err) } func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) { tr.commandHandler = handler } func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error { applied := 0 for _, op := range configure.GetOperations() { log.WithField("op", op).Debug("processing telegraf config operation") if tr.handleTelegrafConfigurationOp(op) { applied++ } } if applied == 0 { return &noAppliedConfigsError{} } return nil } func (tr *TelegrafRunner) concatConfigs() []byte { var configs []byte configs = append(configs, tr.tomlMainConfig...) // telegraf can only handle one 'inputs' header per file so add exactly one here configs = append(configs, []byte("[inputs]")...) for _, v := range tr.tomlConfigs { // remove the other redundant '[inputs]' headers here if bytes.Equal([]byte("[inputs]"), v[0:8]) { v = v[8:] } configs = append(configs, v...) } return configs } func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool { switch op.GetType() { case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY: var finalConfig []byte var err error finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval) if err != nil { log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML") return false } tr.tomlConfigs[op.GetId()] = finalConfig return true case telemetry_edge.ConfigurationOp_REMOVE: if _, ok := tr.tomlConfigs[op.GetId()]; ok { delete(tr.tomlConfigs, op.GetId()) return true } return false } return false } func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error { resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf") err := addNetRawCapabilities(resolvedExePath) if err != nil { log.WithError(err). WithField("agentExe", resolvedExePath). Warn("failed to set net_raw capabilities on telegraf, native ping will not work") } return nil } func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) { log.Debug("ensuring telegraf is in correct running state") if !tr.hasRequiredPaths() { log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed") tr.commandHandler.Stop(tr.running) return } if tr.running.IsRunning() { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Debug("already running") if applyConfigs { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("signaling config reload") tr.handleConfigReload() } return } runningContext := tr.commandHandler.CreateContext(ctx, telemetry_edge.AgentType_TELEGRAF, tr.exePath(), tr.basePath, "--config", tr.configServerURL) // telegraf returns the INFLUX_TOKEN in the http config request header runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken) err := tr.commandHandler.StartAgentCommand(runningContext, telemetry_edge.AgentType_TELEGRAF, "Loaded inputs:", telegrafStartupDuration) if err != nil { log.WithError(err). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Warn("failed to start agent") return } go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext) tr.running = runningContext log.WithField("pid", runningContext.Pid()). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("started agent") } // exePath returns path to executable relative to baseDir func (tr *TelegrafRunner) exePath() string { return filepath.Join(currentVerLink, binSubpath, "telegraf") } func (tr *TelegrafRunner)
() { tr.commandHandler.Stop(tr.running) tr.running = nil } func (tr *TelegrafRunner) createMainConfig() ([]byte, error) { data := &telegrafMainConfigData{ IngestAddress: tr.ingestAddress, DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval), MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval), } var b bytes.Buffer err := telegrafMainConfigTmpl.Execute(&b, data) if err != nil { return nil, errors.Wrap(err, "failed to execute telegraf main config template") } return b.Bytes(), nil } func (tr *TelegrafRunner) handleConfigReload() { if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil { log.WithError(err).WithField("pid", tr.running.Pid()). Warn("failed to signal agent process") } } func (tr *TelegrafRunner) hasRequiredPaths() bool { fullExePath := path.Join(tr.basePath, tr.exePath()) if !fileExists(fullExePath) { log.WithField("exe", fullExePath).Debug("missing exe") return false } return true } func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) { // Convert content to TOML configToml, err := ConvertJsonToTelegrafToml(content, nil, 0) if err != nil { return nil, errors.Wrapf(err, "failed to convert config content") } // Generate token/id used for authenticating and pulling telegraf config testConfigServerToken := uuid.NewV4().String() testConfigServerId := uuid.NewV4().String() // Bind to the next available port by using :0 listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, errors.Wrap(err, "couldn't create http listener") } //noinspection GoUnhandledErrorResult defer listener.Close() listenerPort := listener.Addr().(*net.TCPAddr).Port hostPort := fmt.Sprintf("127.0.0.1:%d", listenerPort) configServerErrors := make(chan error, 2) testConfigRunner := telegrafTestConfigRunnerBuilder(testConfigServerId, testConfigServerToken) // Start the config server configServer := testConfigRunner.StartTestConfigServer(configToml, configServerErrors, listener) // Run the telegraf test command results := &telemetry_edge.TestMonitorResults{ CorrelationId: correlationId, Errors: []string{}, } // Sometimes telegraf --test completes with empty output and no error indicated, // so retry a few times. If that still fails, then a parse error will be produced as without retrying. var cmdOut []byte for attempt := 0; attempt < telegrafMaxTestMonitorRetries; attempt++ { cmdOut, err = testConfigRunner.RunCommand(hostPort, tr.exePath(), tr.basePath, timeout) if err != nil || len(cmdOut) != 0 { break } // wait just a bit between each try time.Sleep(telegrafTestMonitorRetryDelay) } log. WithError(err). WithField("correlationId", correlationId). WithField("content", content). WithField("out", string(cmdOut)). Debug("ran telegraf with test config") if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { exitErrMessage := err.Error() // checking error's message is portable and easy way to determine if the exec timeout was exceeded if exitErrMessage == "signal: killed" { results.Errors = append(results.Errors, "Command took too long to run") } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } results.Errors = append(results.Errors, "Command failed with error output: "+string(exitErr.Stderr)) } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } } else { // ... and process output parsedMetrics, err := lineproto.ParseInfluxLineProtocolMetrics(cmdOut) if err != nil { results.Errors = append(results.Errors, "Failed to parse telegraf output: "+err.Error()) } else { // Wrap up the named tag-value metrics into the general metrics type results.Metrics = make([]*telemetry_edge.Metric, len(parsedMetrics)) for i, metric := range parsedMetrics { results.Metrics[i] = &telemetry_edge.Metric{ Variant: &telemetry_edge.Metric_NameTagValue{NameTagValue: metric}, } } } } // Close out the temporary config server _ = configServer.Close() close(configServerErrors) // ...capture any errors from the config server for err := range configServerErrors { results.Errors = append(results.Errors, "ConfigServer: "+err.Error()) } return results, nil }
Stop
identifier_name
telegraf.go
/* * Copyright 2020 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package agents import ( "bytes" "context" "fmt" "github.com/pkg/errors" "github.com/racker/salus-telemetry-envoy/config" "github.com/racker/salus-telemetry-envoy/lineproto" "github.com/racker/salus-telemetry-protocol/telemetry_edge" "github.com/satori/go.uuid" log "github.com/sirupsen/logrus" "github.com/spf13/viper" "net" "net/http" "os/exec" "path" "path/filepath" "syscall" "text/template" "time" ) var telegrafMainConfigTmpl = template.Must(template.New("telegrafMain").Parse(` [agent] interval = "{{.DefaultMonitoringInterval}}" flush_interval = "{{.MaxFlushInterval}}" flush_jitter = "2s" omit_hostname = true [[outputs.socket_writer]] address = "tcp://{{.IngestAddress}}" data_format = "json" json_timestamp_units = "1ms" [[inputs.internal]] collect_memstats = false `)) var ( telegrafStartupDuration = 10 * time.Second ) const ( telegrafMaxTestMonitorRetries = 3 telegrafTestMonitorRetryDelay = 500 * time.Millisecond ) type telegrafMainConfigData struct { IngestAddress string DefaultMonitoringInterval time.Duration
type TelegrafRunner struct { ingestAddress string basePath string running *AgentRunningContext commandHandler CommandHandler configServerMux *http.ServeMux configServerURL string configServerToken string configServerHandler http.HandlerFunc tomlMainConfig []byte // tomlConfigs key is the "bound monitor id", i.e. monitorId_resourceId tomlConfigs map[string][]byte } func (tr *TelegrafRunner) PurgeConfig() error { tr.tomlConfigs = make(map[string][]byte) return nil } func init() { registerSpecificAgentRunner(telemetry_edge.AgentType_TELEGRAF, &TelegrafRunner{}) } func (tr *TelegrafRunner) Load(agentBasePath string) error { tr.ingestAddress = config.GetListenerAddress(config.TelegrafJsonListener) tr.basePath = agentBasePath tr.configServerToken = uuid.NewV4().String() tr.configServerHandler = func(w http.ResponseWriter, r *http.Request) { if r.Header.Get("authorization") != "Token "+tr.configServerToken { http.Error(w, "unauthorized", http.StatusUnauthorized) return } _, err := w.Write(tr.concatConfigs()) if err != nil { log.Errorf("Error writing config page %v", err) } } serverId := uuid.NewV4().String() tr.configServerMux = http.NewServeMux() tr.configServerMux.Handle("/"+serverId, tr.configServerHandler) // Get the next available port listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return errors.Wrap(err, "couldn't create http listener") } listenerPort := listener.Addr().(*net.TCPAddr).Port tr.configServerURL = fmt.Sprintf("http://127.0.0.1:%d/%s", listenerPort, serverId) tr.tomlConfigs = make(map[string][]byte) mainConfig, err := tr.createMainConfig() if err != nil { return errors.Wrap(err, "couldn't create main config") } tr.tomlMainConfig = mainConfig go tr.serve(listener) return nil } func (tr *TelegrafRunner) serve(listener net.Listener) { log.Info("started webServer") err := http.Serve(listener, tr.configServerMux) // Note this is probably not the best way to handle webserver failure log.Fatalf("web server error %v", err) } func (tr *TelegrafRunner) SetCommandHandler(handler CommandHandler) { tr.commandHandler = handler } func (tr *TelegrafRunner) ProcessConfig(configure *telemetry_edge.EnvoyInstructionConfigure) error { applied := 0 for _, op := range configure.GetOperations() { log.WithField("op", op).Debug("processing telegraf config operation") if tr.handleTelegrafConfigurationOp(op) { applied++ } } if applied == 0 { return &noAppliedConfigsError{} } return nil } func (tr *TelegrafRunner) concatConfigs() []byte { var configs []byte configs = append(configs, tr.tomlMainConfig...) // telegraf can only handle one 'inputs' header per file so add exactly one here configs = append(configs, []byte("[inputs]")...) for _, v := range tr.tomlConfigs { // remove the other redundant '[inputs]' headers here if bytes.Equal([]byte("[inputs]"), v[0:8]) { v = v[8:] } configs = append(configs, v...) } return configs } func (tr *TelegrafRunner) handleTelegrafConfigurationOp(op *telemetry_edge.ConfigurationOp) bool { switch op.GetType() { case telemetry_edge.ConfigurationOp_CREATE, telemetry_edge.ConfigurationOp_MODIFY: var finalConfig []byte var err error finalConfig, err = ConvertJsonToTelegrafToml(op.GetContent(), op.ExtraLabels, op.Interval) if err != nil { log.WithError(err).WithField("op", op).Warn("failed to convert config blob to TOML") return false } tr.tomlConfigs[op.GetId()] = finalConfig return true case telemetry_edge.ConfigurationOp_REMOVE: if _, ok := tr.tomlConfigs[op.GetId()]; ok { delete(tr.tomlConfigs, op.GetId()) return true } return false } return false } func (tr *TelegrafRunner) PostInstall(agentVersionPath string) error { resolvedExePath := path.Join(agentVersionPath, binSubpath, "telegraf") err := addNetRawCapabilities(resolvedExePath) if err != nil { log.WithError(err). WithField("agentExe", resolvedExePath). Warn("failed to set net_raw capabilities on telegraf, native ping will not work") } return nil } func (tr *TelegrafRunner) EnsureRunningState(ctx context.Context, applyConfigs bool) { log.Debug("ensuring telegraf is in correct running state") if !tr.hasRequiredPaths() { log.Debug("telegraf not runnable due to some missing paths and files, stopping if needed") tr.commandHandler.Stop(tr.running) return } if tr.running.IsRunning() { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Debug("already running") if applyConfigs { log. WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("signaling config reload") tr.handleConfigReload() } return } runningContext := tr.commandHandler.CreateContext(ctx, telemetry_edge.AgentType_TELEGRAF, tr.exePath(), tr.basePath, "--config", tr.configServerURL) // telegraf returns the INFLUX_TOKEN in the http config request header runningContext.AppendEnv("INFLUX_TOKEN=" + tr.configServerToken) err := tr.commandHandler.StartAgentCommand(runningContext, telemetry_edge.AgentType_TELEGRAF, "Loaded inputs:", telegrafStartupDuration) if err != nil { log.WithError(err). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Warn("failed to start agent") return } go tr.commandHandler.WaitOnAgentCommand(ctx, tr, runningContext) tr.running = runningContext log.WithField("pid", runningContext.Pid()). WithField("agentType", telemetry_edge.AgentType_TELEGRAF). Info("started agent") } // exePath returns path to executable relative to baseDir func (tr *TelegrafRunner) exePath() string { return filepath.Join(currentVerLink, binSubpath, "telegraf") } func (tr *TelegrafRunner) Stop() { tr.commandHandler.Stop(tr.running) tr.running = nil } func (tr *TelegrafRunner) createMainConfig() ([]byte, error) { data := &telegrafMainConfigData{ IngestAddress: tr.ingestAddress, DefaultMonitoringInterval: viper.GetDuration(config.AgentsDefaultMonitoringInterval), MaxFlushInterval: viper.GetDuration(config.AgentsMaxFlushInterval), } var b bytes.Buffer err := telegrafMainConfigTmpl.Execute(&b, data) if err != nil { return nil, errors.Wrap(err, "failed to execute telegraf main config template") } return b.Bytes(), nil } func (tr *TelegrafRunner) handleConfigReload() { if err := tr.commandHandler.Signal(tr.running, syscall.SIGHUP); err != nil { log.WithError(err).WithField("pid", tr.running.Pid()). Warn("failed to signal agent process") } } func (tr *TelegrafRunner) hasRequiredPaths() bool { fullExePath := path.Join(tr.basePath, tr.exePath()) if !fileExists(fullExePath) { log.WithField("exe", fullExePath).Debug("missing exe") return false } return true } func (tr *TelegrafRunner) ProcessTestMonitor(correlationId string, content string, timeout time.Duration) (*telemetry_edge.TestMonitorResults, error) { // Convert content to TOML configToml, err := ConvertJsonToTelegrafToml(content, nil, 0) if err != nil { return nil, errors.Wrapf(err, "failed to convert config content") } // Generate token/id used for authenticating and pulling telegraf config testConfigServerToken := uuid.NewV4().String() testConfigServerId := uuid.NewV4().String() // Bind to the next available port by using :0 listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, errors.Wrap(err, "couldn't create http listener") } //noinspection GoUnhandledErrorResult defer listener.Close() listenerPort := listener.Addr().(*net.TCPAddr).Port hostPort := fmt.Sprintf("127.0.0.1:%d", listenerPort) configServerErrors := make(chan error, 2) testConfigRunner := telegrafTestConfigRunnerBuilder(testConfigServerId, testConfigServerToken) // Start the config server configServer := testConfigRunner.StartTestConfigServer(configToml, configServerErrors, listener) // Run the telegraf test command results := &telemetry_edge.TestMonitorResults{ CorrelationId: correlationId, Errors: []string{}, } // Sometimes telegraf --test completes with empty output and no error indicated, // so retry a few times. If that still fails, then a parse error will be produced as without retrying. var cmdOut []byte for attempt := 0; attempt < telegrafMaxTestMonitorRetries; attempt++ { cmdOut, err = testConfigRunner.RunCommand(hostPort, tr.exePath(), tr.basePath, timeout) if err != nil || len(cmdOut) != 0 { break } // wait just a bit between each try time.Sleep(telegrafTestMonitorRetryDelay) } log. WithError(err). WithField("correlationId", correlationId). WithField("content", content). WithField("out", string(cmdOut)). Debug("ran telegraf with test config") if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { exitErrMessage := err.Error() // checking error's message is portable and easy way to determine if the exec timeout was exceeded if exitErrMessage == "signal: killed" { results.Errors = append(results.Errors, "Command took too long to run") } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } results.Errors = append(results.Errors, "Command failed with error output: "+string(exitErr.Stderr)) } else { results.Errors = append(results.Errors, "Command failed: "+err.Error()) } } else { // ... and process output parsedMetrics, err := lineproto.ParseInfluxLineProtocolMetrics(cmdOut) if err != nil { results.Errors = append(results.Errors, "Failed to parse telegraf output: "+err.Error()) } else { // Wrap up the named tag-value metrics into the general metrics type results.Metrics = make([]*telemetry_edge.Metric, len(parsedMetrics)) for i, metric := range parsedMetrics { results.Metrics[i] = &telemetry_edge.Metric{ Variant: &telemetry_edge.Metric_NameTagValue{NameTagValue: metric}, } } } } // Close out the temporary config server _ = configServer.Close() close(configServerErrors) // ...capture any errors from the config server for err := range configServerErrors { results.Errors = append(results.Errors, "ConfigServer: "+err.Error()) } return results, nil }
MaxFlushInterval time.Duration }
random_line_split
main_test.go
package main import ( "bufio" "encoding/json" "errors" "fmt" "io/ioutil" "log" "os" "strings" "testing" "github.com/CMSgov/bcda-app/bcda/client" "github.com/CMSgov/bcda-app/bcda/database" "github.com/CMSgov/bcda-app/bcda/models" "github.com/CMSgov/bcda-app/bcda/testUtils" que "github.com/bgentry/que-go" "github.com/pborman/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" ) type MockBlueButtonClient struct { mock.Mock client.BlueButtonClient } type MainTestSuite struct { testUtils.AuthTestSuite } func (s *MainTestSuite) SetupTest() { os.Setenv("FHIR_PAYLOAD_DIR", "data/test") os.Setenv("BB_CLIENT_CERT_FILE", "../shared_files/bb-dev-test-cert.pem") os.Setenv("BB_CLIENT_KEY_FILE", "../shared_files/bb-dev-test-key.pem") os.Setenv("BB_CLIENT_CA_FILE", "../shared_files/localhost.crt") models.InitializeGormModels() } func (s *MainTestSuite) TearDownTest() { testUtils.PrintSeparator() } func TestMainTestSuite(t *testing.T) { suite.Run(t, new(MainTestSuite)) } func TestWriteEOBDataToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262b07" beneficiaryIDs := []string{"10000", "11000"} jobID := "1" staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID) // clean out the data dir before beginning this test os.RemoveAll(staging) testUtils.CreateStaging(jobID) for i := 0; i < len(beneficiaryIDs); i++ { bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i])) } _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } files, err := ioutil.ReadDir(staging) assert.Nil(t, err) assert.Equal(t, 1, len(files)) for _, f := range files { fmt.Println(f.Name()) filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name()) file, err := os.Open(filePath) if err != nil { log.Fatal(err) } scanner := bufio.NewScanner(file) // 33 entries in test EOB data returned by bbc.getData, times two beneficiaries for i := 0; i < 66; i++ { assert.True(t, scanner.Scan()) var jsonOBJ map[string]interface{} err := json.Unmarshal(scanner.Bytes(), &jsonOBJ) assert.Nil(t, err) assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.") assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.") } assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.") bbc.AssertExpectations(t) file.Close() os.Remove(filePath) } } func TestWriteEOBDataToFileNoClient(t *testing.T) { _, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileInvalidACO(t *testing.T) { bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz" beneficiaryIDs := []string{"10000", "11000"} _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "70") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName)) os.Remove(filePath) } func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") assert.Equal(t, "number of failed requests has exceeded threshold", err.Error()) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) // should not have requested third beneficiary EOB because failure threshold was reached after second bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000") os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)) os.Remove(filePath) } func
(t *testing.T) { origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") assert.Equal(t, 60.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "-1") assert.Equal(t, 0.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "500") assert.Equal(t, 100.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "zero") assert.Equal(t, 50.0, getFailureThreshold()) } func TestAppendErrorToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d" jobID := "1" testUtils.CreateStaging(jobID) appendErrorToFile(acoID, "", "", "", jobID) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}` assert.Equal(t, ooResp+"\n", string(fData)) os.Remove(filePath) } func (bbc *MockBlueButtonClient) GetExplanationOfBenefitData(patientID string, jobID string) (string, error) { args := bbc.Called(patientID) return args.String(0), args.Error(1) } // Returns copy of a static json file (From Blue Button Sandbox originally) after replacing the patient ID of 20000000000001 with the requested identifier func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error) { fData, err := ioutil.ReadFile("../shared_files/synthetic_beneficiary_data/" + endpoint) if err != nil { return "", err } cleanData := strings.Replace(string(fData), "20000000000001", patientID, -1) return cleanData, err } func (s *MainTestSuite) TestProcessJobEOB() { db := database.GetGORMDbConnection() defer database.Close(db) j := models.Job{ ACOID: uuid.Parse("DBBD1CE1-AE24-435C-807D-ED45953077D3"), UserID: uuid.Parse("82503A18-BF3B-436D-BA7B-BAE09B7FFD2F"), RequestURL: "/api/v1/Patient/$export", Status: "Pending", JobCount: 1, } db.Save(&j) complete, err := j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) assert.False(s.T(), complete) jobArgs := jobEnqueueArgs{ ID: int(j.ID), ACOID: j.ACOID.String(), UserID: j.UserID.String(), BeneficiaryIDs: []string{"10000", "11000"}, ResourceType: "ExplanationOfBenefit", } args, _ := json.Marshal(jobArgs) job := &que.Job{ Type: "ProcessJob", Args: args, } fmt.Println("About to queue up the job") err = processJob(job) assert.Nil(s.T(), err) _, err = j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) var completedJob models.Job err = db.First(&completedJob, "ID = ?", jobArgs.ID).Error assert.Nil(s.T(), err) // As this test actually connects to BB, we can't be sure it will succeed assert.Contains(s.T(), []string{"Failed", "Completed"}, completedJob.Status) } func (s *MainTestSuite) TestSetupQueue() { setupQueue() os.Setenv("WORKER_POOL_SIZE", "7") setupQueue() }
TestGetFailureThreshold
identifier_name
main_test.go
package main import ( "bufio" "encoding/json" "errors" "fmt" "io/ioutil" "log" "os" "strings" "testing" "github.com/CMSgov/bcda-app/bcda/client" "github.com/CMSgov/bcda-app/bcda/database" "github.com/CMSgov/bcda-app/bcda/models" "github.com/CMSgov/bcda-app/bcda/testUtils" que "github.com/bgentry/que-go" "github.com/pborman/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" ) type MockBlueButtonClient struct { mock.Mock client.BlueButtonClient } type MainTestSuite struct { testUtils.AuthTestSuite } func (s *MainTestSuite) SetupTest() { os.Setenv("FHIR_PAYLOAD_DIR", "data/test") os.Setenv("BB_CLIENT_CERT_FILE", "../shared_files/bb-dev-test-cert.pem") os.Setenv("BB_CLIENT_KEY_FILE", "../shared_files/bb-dev-test-key.pem") os.Setenv("BB_CLIENT_CA_FILE", "../shared_files/localhost.crt") models.InitializeGormModels() } func (s *MainTestSuite) TearDownTest() { testUtils.PrintSeparator() } func TestMainTestSuite(t *testing.T) { suite.Run(t, new(MainTestSuite)) } func TestWriteEOBDataToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262b07" beneficiaryIDs := []string{"10000", "11000"} jobID := "1" staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID) // clean out the data dir before beginning this test os.RemoveAll(staging) testUtils.CreateStaging(jobID) for i := 0; i < len(beneficiaryIDs); i++ { bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i])) } _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } files, err := ioutil.ReadDir(staging) assert.Nil(t, err) assert.Equal(t, 1, len(files)) for _, f := range files { fmt.Println(f.Name()) filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name()) file, err := os.Open(filePath) if err != nil { log.Fatal(err) } scanner := bufio.NewScanner(file) // 33 entries in test EOB data returned by bbc.getData, times two beneficiaries for i := 0; i < 66; i++ { assert.True(t, scanner.Scan()) var jsonOBJ map[string]interface{} err := json.Unmarshal(scanner.Bytes(), &jsonOBJ) assert.Nil(t, err) assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.") assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.") } assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.") bbc.AssertExpectations(t) file.Close() os.Remove(filePath) } } func TestWriteEOBDataToFileNoClient(t *testing.T) { _, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileInvalidACO(t *testing.T) { bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz" beneficiaryIDs := []string{"10000", "11000"} _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "70") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName)) os.Remove(filePath) } func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") assert.Equal(t, "number of failed requests has exceeded threshold", err.Error()) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) // should not have requested third beneficiary EOB because failure threshold was reached after second bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000") os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)) os.Remove(filePath) } func TestGetFailureThreshold(t *testing.T) { origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") assert.Equal(t, 60.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "-1") assert.Equal(t, 0.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "500") assert.Equal(t, 100.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "zero") assert.Equal(t, 50.0, getFailureThreshold()) } func TestAppendErrorToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d" jobID := "1" testUtils.CreateStaging(jobID) appendErrorToFile(acoID, "", "", "", jobID) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail()
assert.Equal(t, ooResp+"\n", string(fData)) os.Remove(filePath) } func (bbc *MockBlueButtonClient) GetExplanationOfBenefitData(patientID string, jobID string) (string, error) { args := bbc.Called(patientID) return args.String(0), args.Error(1) } // Returns copy of a static json file (From Blue Button Sandbox originally) after replacing the patient ID of 20000000000001 with the requested identifier func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error) { fData, err := ioutil.ReadFile("../shared_files/synthetic_beneficiary_data/" + endpoint) if err != nil { return "", err } cleanData := strings.Replace(string(fData), "20000000000001", patientID, -1) return cleanData, err } func (s *MainTestSuite) TestProcessJobEOB() { db := database.GetGORMDbConnection() defer database.Close(db) j := models.Job{ ACOID: uuid.Parse("DBBD1CE1-AE24-435C-807D-ED45953077D3"), UserID: uuid.Parse("82503A18-BF3B-436D-BA7B-BAE09B7FFD2F"), RequestURL: "/api/v1/Patient/$export", Status: "Pending", JobCount: 1, } db.Save(&j) complete, err := j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) assert.False(s.T(), complete) jobArgs := jobEnqueueArgs{ ID: int(j.ID), ACOID: j.ACOID.String(), UserID: j.UserID.String(), BeneficiaryIDs: []string{"10000", "11000"}, ResourceType: "ExplanationOfBenefit", } args, _ := json.Marshal(jobArgs) job := &que.Job{ Type: "ProcessJob", Args: args, } fmt.Println("About to queue up the job") err = processJob(job) assert.Nil(s.T(), err) _, err = j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) var completedJob models.Job err = db.First(&completedJob, "ID = ?", jobArgs.ID).Error assert.Nil(s.T(), err) // As this test actually connects to BB, we can't be sure it will succeed assert.Contains(s.T(), []string{"Failed", "Completed"}, completedJob.Status) } func (s *MainTestSuite) TestSetupQueue() { setupQueue() os.Setenv("WORKER_POOL_SIZE", "7") setupQueue() }
} ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}`
random_line_split
main_test.go
package main import ( "bufio" "encoding/json" "errors" "fmt" "io/ioutil" "log" "os" "strings" "testing" "github.com/CMSgov/bcda-app/bcda/client" "github.com/CMSgov/bcda-app/bcda/database" "github.com/CMSgov/bcda-app/bcda/models" "github.com/CMSgov/bcda-app/bcda/testUtils" que "github.com/bgentry/que-go" "github.com/pborman/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" ) type MockBlueButtonClient struct { mock.Mock client.BlueButtonClient } type MainTestSuite struct { testUtils.AuthTestSuite } func (s *MainTestSuite) SetupTest() { os.Setenv("FHIR_PAYLOAD_DIR", "data/test") os.Setenv("BB_CLIENT_CERT_FILE", "../shared_files/bb-dev-test-cert.pem") os.Setenv("BB_CLIENT_KEY_FILE", "../shared_files/bb-dev-test-key.pem") os.Setenv("BB_CLIENT_CA_FILE", "../shared_files/localhost.crt") models.InitializeGormModels() } func (s *MainTestSuite) TearDownTest() { testUtils.PrintSeparator() } func TestMainTestSuite(t *testing.T) { suite.Run(t, new(MainTestSuite)) } func TestWriteEOBDataToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262b07" beneficiaryIDs := []string{"10000", "11000"} jobID := "1" staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID) // clean out the data dir before beginning this test os.RemoveAll(staging) testUtils.CreateStaging(jobID) for i := 0; i < len(beneficiaryIDs); i++ { bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i])) } _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } files, err := ioutil.ReadDir(staging) assert.Nil(t, err) assert.Equal(t, 1, len(files)) for _, f := range files { fmt.Println(f.Name()) filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name()) file, err := os.Open(filePath) if err != nil
scanner := bufio.NewScanner(file) // 33 entries in test EOB data returned by bbc.getData, times two beneficiaries for i := 0; i < 66; i++ { assert.True(t, scanner.Scan()) var jsonOBJ map[string]interface{} err := json.Unmarshal(scanner.Bytes(), &jsonOBJ) assert.Nil(t, err) assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.") assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.") } assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.") bbc.AssertExpectations(t) file.Close() os.Remove(filePath) } } func TestWriteEOBDataToFileNoClient(t *testing.T) { _, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileInvalidACO(t *testing.T) { bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz" beneficiaryIDs := []string{"10000", "11000"} _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "70") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName)) os.Remove(filePath) } func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") assert.Equal(t, "number of failed requests has exceeded threshold", err.Error()) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) // should not have requested third beneficiary EOB because failure threshold was reached after second bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000") os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)) os.Remove(filePath) } func TestGetFailureThreshold(t *testing.T) { origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") assert.Equal(t, 60.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "-1") assert.Equal(t, 0.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "500") assert.Equal(t, 100.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "zero") assert.Equal(t, 50.0, getFailureThreshold()) } func TestAppendErrorToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d" jobID := "1" testUtils.CreateStaging(jobID) appendErrorToFile(acoID, "", "", "", jobID) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}` assert.Equal(t, ooResp+"\n", string(fData)) os.Remove(filePath) } func (bbc *MockBlueButtonClient) GetExplanationOfBenefitData(patientID string, jobID string) (string, error) { args := bbc.Called(patientID) return args.String(0), args.Error(1) } // Returns copy of a static json file (From Blue Button Sandbox originally) after replacing the patient ID of 20000000000001 with the requested identifier func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error) { fData, err := ioutil.ReadFile("../shared_files/synthetic_beneficiary_data/" + endpoint) if err != nil { return "", err } cleanData := strings.Replace(string(fData), "20000000000001", patientID, -1) return cleanData, err } func (s *MainTestSuite) TestProcessJobEOB() { db := database.GetGORMDbConnection() defer database.Close(db) j := models.Job{ ACOID: uuid.Parse("DBBD1CE1-AE24-435C-807D-ED45953077D3"), UserID: uuid.Parse("82503A18-BF3B-436D-BA7B-BAE09B7FFD2F"), RequestURL: "/api/v1/Patient/$export", Status: "Pending", JobCount: 1, } db.Save(&j) complete, err := j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) assert.False(s.T(), complete) jobArgs := jobEnqueueArgs{ ID: int(j.ID), ACOID: j.ACOID.String(), UserID: j.UserID.String(), BeneficiaryIDs: []string{"10000", "11000"}, ResourceType: "ExplanationOfBenefit", } args, _ := json.Marshal(jobArgs) job := &que.Job{ Type: "ProcessJob", Args: args, } fmt.Println("About to queue up the job") err = processJob(job) assert.Nil(s.T(), err) _, err = j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) var completedJob models.Job err = db.First(&completedJob, "ID = ?", jobArgs.ID).Error assert.Nil(s.T(), err) // As this test actually connects to BB, we can't be sure it will succeed assert.Contains(s.T(), []string{"Failed", "Completed"}, completedJob.Status) } func (s *MainTestSuite) TestSetupQueue() { setupQueue() os.Setenv("WORKER_POOL_SIZE", "7") setupQueue() }
{ log.Fatal(err) }
conditional_block
main_test.go
package main import ( "bufio" "encoding/json" "errors" "fmt" "io/ioutil" "log" "os" "strings" "testing" "github.com/CMSgov/bcda-app/bcda/client" "github.com/CMSgov/bcda-app/bcda/database" "github.com/CMSgov/bcda-app/bcda/models" "github.com/CMSgov/bcda-app/bcda/testUtils" que "github.com/bgentry/que-go" "github.com/pborman/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" ) type MockBlueButtonClient struct { mock.Mock client.BlueButtonClient } type MainTestSuite struct { testUtils.AuthTestSuite } func (s *MainTestSuite) SetupTest() { os.Setenv("FHIR_PAYLOAD_DIR", "data/test") os.Setenv("BB_CLIENT_CERT_FILE", "../shared_files/bb-dev-test-cert.pem") os.Setenv("BB_CLIENT_KEY_FILE", "../shared_files/bb-dev-test-key.pem") os.Setenv("BB_CLIENT_CA_FILE", "../shared_files/localhost.crt") models.InitializeGormModels() } func (s *MainTestSuite) TearDownTest() { testUtils.PrintSeparator() } func TestMainTestSuite(t *testing.T) { suite.Run(t, new(MainTestSuite)) } func TestWriteEOBDataToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262b07" beneficiaryIDs := []string{"10000", "11000"} jobID := "1" staging := fmt.Sprintf("%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID) // clean out the data dir before beginning this test os.RemoveAll(staging) testUtils.CreateStaging(jobID) for i := 0; i < len(beneficiaryIDs); i++ { bbc.On("GetExplanationOfBenefitData", beneficiaryIDs[i]).Return(bbc.getData("ExplanationOfBenefit", beneficiaryIDs[i])) } _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } files, err := ioutil.ReadDir(staging) assert.Nil(t, err) assert.Equal(t, 1, len(files)) for _, f := range files { fmt.Println(f.Name()) filePath := fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, f.Name()) file, err := os.Open(filePath) if err != nil { log.Fatal(err) } scanner := bufio.NewScanner(file) // 33 entries in test EOB data returned by bbc.getData, times two beneficiaries for i := 0; i < 66; i++ { assert.True(t, scanner.Scan()) var jsonOBJ map[string]interface{} err := json.Unmarshal(scanner.Bytes(), &jsonOBJ) assert.Nil(t, err) assert.NotNil(t, jsonOBJ["fullUrl"], "JSON should contain a value for `fullUrl`.") assert.NotNil(t, jsonOBJ["resource"], "JSON should contain a value for `resource`.") } assert.False(t, scanner.Scan(), "There should be only 66 entries in the file.") bbc.AssertExpectations(t) file.Close() os.Remove(filePath) } } func TestWriteEOBDataToFileNoClient(t *testing.T) { _, err := writeBBDataToFile(nil, "9c05c1f8-349d-400f-9b69-7963f2262b08", []string{"20000", "21000"}, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileInvalidACO(t *testing.T) { bbc := MockBlueButtonClient{} acoID := "9c05c1f8-349d-400f-9b69-7963f2262zzz" beneficiaryIDs := []string{"10000", "11000"} _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, "1", "ExplanationOfBenefit") assert.NotNil(t, err) } func TestWriteEOBDataToFileWithErrorsBelowFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "70") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "12000").Return(bbc.getData("ExplanationOfBenefit", "12000")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) fileName, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") if err != nil { t.Fail() } filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) os.Remove(fmt.Sprintf("%s/%s/%s", os.Getenv("FHIR_STAGING_DIR"), jobID, fileName)) os.Remove(filePath) } func TestWriteEOBDataToFileWithErrorsAboveFailureThreshold(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") bbc := MockBlueButtonClient{} // Set up the mock function to return the expected values bbc.On("GetExplanationOfBenefitData", "10000").Return("", errors.New("error")) bbc.On("GetExplanationOfBenefitData", "11000").Return("", errors.New("error")) acoID := "387c3a62-96fa-4d93-a5d0-fd8725509dd9" beneficiaryIDs := []string{"10000", "11000", "12000"} jobID := "1" testUtils.CreateStaging(jobID) _, err := writeBBDataToFile(&bbc, acoID, beneficiaryIDs, jobID, "ExplanationOfBenefit") assert.Equal(t, "number of failed requests has exceeded threshold", err.Error()) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 10000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]} {"resourceType":"OperationOutcome","issue":[{"severity":"Error","code":"Exception","details":{"coding":[{"display":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}],"text":"Error retrieving ExplanationOfBenefit for beneficiary 11000 in ACO 387c3a62-96fa-4d93-a5d0-fd8725509dd9"}}]}` assert.Equal(t, ooResp+"\n", string(fData)) bbc.AssertExpectations(t) // should not have requested third beneficiary EOB because failure threshold was reached after second bbc.AssertNotCalled(t, "GetExplanationOfBenefitData", "12000") os.Remove(fmt.Sprintf("%s/%s/%s.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID)) os.Remove(filePath) } func TestGetFailureThreshold(t *testing.T) { origFailPct := os.Getenv("EXPORT_FAIL_PCT") defer os.Setenv("EXPORT_FAIL_PCT", origFailPct) os.Setenv("EXPORT_FAIL_PCT", "60") assert.Equal(t, 60.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "-1") assert.Equal(t, 0.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "500") assert.Equal(t, 100.0, getFailureThreshold()) os.Setenv("EXPORT_FAIL_PCT", "zero") assert.Equal(t, 50.0, getFailureThreshold()) } func TestAppendErrorToFile(t *testing.T) { os.Setenv("FHIR_STAGING_DIR", "data/test") acoID := "328e83c3-bc46-4827-836c-0ba0c713dc7d" jobID := "1" testUtils.CreateStaging(jobID) appendErrorToFile(acoID, "", "", "", jobID) filePath := fmt.Sprintf("%s/%s/%s-error.ndjson", os.Getenv("FHIR_STAGING_DIR"), jobID, acoID) fData, err := ioutil.ReadFile(filePath) if err != nil { t.Fail() } ooResp := `{"resourceType":"OperationOutcome","issue":[{"severity":"Error"}]}` assert.Equal(t, ooResp+"\n", string(fData)) os.Remove(filePath) } func (bbc *MockBlueButtonClient) GetExplanationOfBenefitData(patientID string, jobID string) (string, error) { args := bbc.Called(patientID) return args.String(0), args.Error(1) } // Returns copy of a static json file (From Blue Button Sandbox originally) after replacing the patient ID of 20000000000001 with the requested identifier func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error)
func (s *MainTestSuite) TestProcessJobEOB() { db := database.GetGORMDbConnection() defer database.Close(db) j := models.Job{ ACOID: uuid.Parse("DBBD1CE1-AE24-435C-807D-ED45953077D3"), UserID: uuid.Parse("82503A18-BF3B-436D-BA7B-BAE09B7FFD2F"), RequestURL: "/api/v1/Patient/$export", Status: "Pending", JobCount: 1, } db.Save(&j) complete, err := j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) assert.False(s.T(), complete) jobArgs := jobEnqueueArgs{ ID: int(j.ID), ACOID: j.ACOID.String(), UserID: j.UserID.String(), BeneficiaryIDs: []string{"10000", "11000"}, ResourceType: "ExplanationOfBenefit", } args, _ := json.Marshal(jobArgs) job := &que.Job{ Type: "ProcessJob", Args: args, } fmt.Println("About to queue up the job") err = processJob(job) assert.Nil(s.T(), err) _, err = j.CheckCompletedAndCleanup() assert.Nil(s.T(), err) var completedJob models.Job err = db.First(&completedJob, "ID = ?", jobArgs.ID).Error assert.Nil(s.T(), err) // As this test actually connects to BB, we can't be sure it will succeed assert.Contains(s.T(), []string{"Failed", "Completed"}, completedJob.Status) } func (s *MainTestSuite) TestSetupQueue() { setupQueue() os.Setenv("WORKER_POOL_SIZE", "7") setupQueue() }
{ fData, err := ioutil.ReadFile("../shared_files/synthetic_beneficiary_data/" + endpoint) if err != nil { return "", err } cleanData := strings.Replace(string(fData), "20000000000001", patientID, -1) return cleanData, err }
identifier_body
api_op_ChangeCidrCollection.go
// Code generated by smithy-go-codegen DO NOT EDIT. package route53 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/route53/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates, changes, or deletes CIDR blocks within a collection. Contains // authoritative IP information mapping blocks to one or multiple locations. A // change request can update multiple locations in a collection at a time, which is // helpful if you want to move one or more CIDR blocks from one location to another // in one transaction, without downtime. Limits The max number of CIDR blocks // included in the request is 1000. As a result, big updates require multiple API // calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the // following actions: // - PUT : Create a CIDR block within the specified collection. // - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection. func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) { if params == nil { params = &ChangeCidrCollectionInput{} } result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares) if err != nil { return nil, err } out := result.(*ChangeCidrCollectionOutput) out.ResultMetadata = metadata return out, nil } type ChangeCidrCollectionInput struct { // Information about changes to a CIDR collection. // // This member is required. Changes []types.CidrCollectionChange // The UUID of the CIDR collection to update. // // This member is required. Id *string // A sequential counter that Amazon Route 53 sets to 1 when you create a // collection and increments it by 1 each time you update the collection. We // recommend that you use ListCidrCollection to get the current value of // CollectionVersion for the collection that you want to update, and then include // that value with the change request. This prevents Route 53 from overwriting an // intervening update: // - If the value in the request matches the value of CollectionVersion in the // collection, Route 53 updates the collection. // - If the value of CollectionVersion in the collection is greater than the // value in the request, the collection was changed after you got the version // number. Route 53 does not update the collection, and it returns a // CidrCollectionVersionMismatch error. CollectionVersion *int64 noSmithyDocumentSerde } type ChangeCidrCollectionOutput struct { // The ID that is returned by ChangeCidrCollection . You can use it as input to // GetChange to see if a CIDR collection change has propagated or not. // // This member is required. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil
if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "route53", OperationName: "ChangeCidrCollection", } } type opChangeCidrCollectionResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "route53" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "route53" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("route53") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addChangeCidrCollectionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opChangeCidrCollectionResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseDualStack: options.EndpointOptions.UseDualStackEndpoint, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
{ return err }
conditional_block
api_op_ChangeCidrCollection.go
// Code generated by smithy-go-codegen DO NOT EDIT. package route53 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/route53/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates, changes, or deletes CIDR blocks within a collection. Contains // authoritative IP information mapping blocks to one or multiple locations. A // change request can update multiple locations in a collection at a time, which is // helpful if you want to move one or more CIDR blocks from one location to another // in one transaction, without downtime. Limits The max number of CIDR blocks // included in the request is 1000. As a result, big updates require multiple API // calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the // following actions: // - PUT : Create a CIDR block within the specified collection. // - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection. func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) { if params == nil { params = &ChangeCidrCollectionInput{} } result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares) if err != nil { return nil, err } out := result.(*ChangeCidrCollectionOutput) out.ResultMetadata = metadata return out, nil } type ChangeCidrCollectionInput struct { // Information about changes to a CIDR collection. // // This member is required. Changes []types.CidrCollectionChange // The UUID of the CIDR collection to update. // // This member is required. Id *string // A sequential counter that Amazon Route 53 sets to 1 when you create a // collection and increments it by 1 each time you update the collection. We // recommend that you use ListCidrCollection to get the current value of // CollectionVersion for the collection that you want to update, and then include // that value with the change request. This prevents Route 53 from overwriting an // intervening update: // - If the value in the request matches the value of CollectionVersion in the // collection, Route 53 updates the collection. // - If the value of CollectionVersion in the collection is greater than the // value in the request, the collection was changed after you got the version // number. Route 53 does not update the collection, and it returns a // CidrCollectionVersionMismatch error. CollectionVersion *int64 noSmithyDocumentSerde } type ChangeCidrCollectionOutput struct { // The ID that is returned by ChangeCidrCollection . You can use it as input to // GetChange to see if a CIDR collection change has propagated or not. // // This member is required. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "route53", OperationName: "ChangeCidrCollection", } } type opChangeCidrCollectionResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "route53" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "route53" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("route53") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func
(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opChangeCidrCollectionResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseDualStack: options.EndpointOptions.UseDualStackEndpoint, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
addChangeCidrCollectionResolveEndpointMiddleware
identifier_name
api_op_ChangeCidrCollection.go
// Code generated by smithy-go-codegen DO NOT EDIT. package route53 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/route53/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates, changes, or deletes CIDR blocks within a collection. Contains // authoritative IP information mapping blocks to one or multiple locations. A // change request can update multiple locations in a collection at a time, which is // helpful if you want to move one or more CIDR blocks from one location to another // in one transaction, without downtime. Limits The max number of CIDR blocks // included in the request is 1000. As a result, big updates require multiple API // calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the // following actions: // - PUT : Create a CIDR block within the specified collection. // - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection. func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) { if params == nil { params = &ChangeCidrCollectionInput{} } result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares) if err != nil { return nil, err } out := result.(*ChangeCidrCollectionOutput) out.ResultMetadata = metadata return out, nil } type ChangeCidrCollectionInput struct { // Information about changes to a CIDR collection. // // This member is required. Changes []types.CidrCollectionChange // The UUID of the CIDR collection to update. // // This member is required. Id *string // A sequential counter that Amazon Route 53 sets to 1 when you create a // collection and increments it by 1 each time you update the collection. We // recommend that you use ListCidrCollection to get the current value of // CollectionVersion for the collection that you want to update, and then include // that value with the change request. This prevents Route 53 from overwriting an // intervening update: // - If the value in the request matches the value of CollectionVersion in the // collection, Route 53 updates the collection. // - If the value of CollectionVersion in the collection is greater than the // value in the request, the collection was changed after you got the version // number. Route 53 does not update the collection, and it returns a // CidrCollectionVersionMismatch error. CollectionVersion *int64 noSmithyDocumentSerde } type ChangeCidrCollectionOutput struct { // The ID that is returned by ChangeCidrCollection . You can use it as input to // GetChange to see if a CIDR collection change has propagated or not. // // This member is required. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err }
} if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "route53", OperationName: "ChangeCidrCollection", } } type opChangeCidrCollectionResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "route53" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "route53" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("route53") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addChangeCidrCollectionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opChangeCidrCollectionResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseDualStack: options.EndpointOptions.UseDualStackEndpoint, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil { return err
random_line_split
api_op_ChangeCidrCollection.go
// Code generated by smithy-go-codegen DO NOT EDIT. package route53 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/route53/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates, changes, or deletes CIDR blocks within a collection. Contains // authoritative IP information mapping blocks to one or multiple locations. A // change request can update multiple locations in a collection at a time, which is // helpful if you want to move one or more CIDR blocks from one location to another // in one transaction, without downtime. Limits The max number of CIDR blocks // included in the request is 1000. As a result, big updates require multiple API // calls. PUT and DELETE_IF_EXISTS Use ChangeCidrCollection to perform the // following actions: // - PUT : Create a CIDR block within the specified collection. // - DELETE_IF_EXISTS : Delete an existing CIDR block from the collection. func (c *Client) ChangeCidrCollection(ctx context.Context, params *ChangeCidrCollectionInput, optFns ...func(*Options)) (*ChangeCidrCollectionOutput, error) { if params == nil { params = &ChangeCidrCollectionInput{} } result, metadata, err := c.invokeOperation(ctx, "ChangeCidrCollection", params, optFns, c.addOperationChangeCidrCollectionMiddlewares) if err != nil { return nil, err } out := result.(*ChangeCidrCollectionOutput) out.ResultMetadata = metadata return out, nil } type ChangeCidrCollectionInput struct { // Information about changes to a CIDR collection. // // This member is required. Changes []types.CidrCollectionChange // The UUID of the CIDR collection to update. // // This member is required. Id *string // A sequential counter that Amazon Route 53 sets to 1 when you create a // collection and increments it by 1 each time you update the collection. We // recommend that you use ListCidrCollection to get the current value of // CollectionVersion for the collection that you want to update, and then include // that value with the change request. This prevents Route 53 from overwriting an // intervening update: // - If the value in the request matches the value of CollectionVersion in the // collection, Route 53 updates the collection. // - If the value of CollectionVersion in the collection is greater than the // value in the request, the collection was changed after you got the version // number. Route 53 does not update the collection, and it returns a // CidrCollectionVersionMismatch error. CollectionVersion *int64 noSmithyDocumentSerde } type ChangeCidrCollectionOutput struct { // The ID that is returned by ChangeCidrCollection . You can use it as input to // GetChange to see if a CIDR collection change has propagated or not. // // This member is required. Id *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationChangeCidrCollectionMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestxml_serializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestxml_deserializeOpChangeCidrCollection{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addChangeCidrCollectionResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpChangeCidrCollectionValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opChangeCidrCollection(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opChangeCidrCollection(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "route53", OperationName: "ChangeCidrCollection", } } type opChangeCidrCollectionResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opChangeCidrCollectionResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opChangeCidrCollectionResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, )
func addChangeCidrCollectionResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opChangeCidrCollectionResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseDualStack: options.EndpointOptions.UseDualStackEndpoint, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
{ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "route53" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "route53" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("route53") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) }
identifier_body
mod.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Substrate service tasks management module. use crate::{config::TaskType, Error}; use exit_future::Signal; use futures::{ future::{pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, }; use parking_lot::Mutex; use prometheus_endpoint::{ exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ collections::{hash_map::Entry, HashMap}, panic, pin::Pin, result::Result, sync::Arc, }; use tokio::runtime::Handle; use tracing_futures::Instrument; mod prometheus_future; #[cfg(test)] mod tests; /// Default task group name. pub const DEFAULT_GROUP_NAME: &str = "default"; /// The name of a group a task belongs to. /// /// This name is passed belong-side the task name to the prometheus metrics and can be used /// to group tasks. pub enum GroupName { /// Sets the group name to `default`. Default, /// Use the specifically given name as group name. Specific(&'static str), } impl From<Option<&'static str>> for GroupName { fn from(name: Option<&'static str>) -> Self { match name { Some(name) => Self::Specific(name), None => Self::Default, } } } impl From<&'static str> for GroupName { fn from(name: &'static str) -> Self { Self::Specific(name) } } /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { on_exit: exit_future::Exit, tokio_handle: Handle, metrics: Option<Metrics>, task_registry: TaskRegistry, } impl SpawnTaskHandle { /// Spawns the given task with the given name and a group name. /// If group is not specified `DEFAULT_GROUP_NAME` will be used. /// /// Note that the `name` is a `&'static str`. The reason for this choice is that /// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and /// that therefore the set of possible task names must be bounded. /// /// In other words, it would be a bad idea for someone to do for example /// `spawn(format!("{:?}", some_public_key))`. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. See also `spawn`. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } /// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`. fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); let registry = self.task_registry.clone(); let group = match group.into() { GroupName::Specific(var) => var, // If no group is specified use default. GroupName::Default => DEFAULT_GROUP_NAME, }; let task_type_label = match task_type { TaskType::Blocking => "blocking", TaskType::Async => "async", }; // Note that we increase the started counter here and not within the future. This way, // we could properly visualize on Prometheus situations where the spawning doesn't work. if let Some(metrics) = &self.metrics { metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc(); // We do a dummy increase in order for the task to show up in metrics. metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc_by(0); } let future = async move { // Register the task and keep the "token" alive until the task is ended. Then this // "token" will unregister this task. let _registry_token = registry.register_task(name, group); if let Some(metrics) = metrics { // Add some wrappers around `task`. let task = {
prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw // away the `Future` after it has panicked. panic::AssertUnwindSafe(inner).catch_unwind() }; futures::pin_mut!(task); match select(on_exit, task).await { Either::Right((Err(payload), _)) => { metrics .tasks_ended .with_label_values(&[name, "panic", group, task_type_label]) .inc(); panic::resume_unwind(payload) }, Either::Right((Ok(()), _)) => { metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc(); }, Either::Left(((), _)) => { // The `on_exit` has triggered. metrics .tasks_ended .with_label_values(&[name, "interrupted", group, task_type_label]) .inc(); }, } } else { futures::pin_mut!(task); let _ = select(on_exit, task).await; } } .in_current_span(); match task_type { TaskType::Async => { self.tokio_handle.spawn(future); }, TaskType::Blocking => { let handle = self.tokio_handle.clone(); self.tokio_handle.spawn_blocking(move || { handle.block_on(future); }); }, } } } impl sp_core::traits::SpawnNamed for SpawnTaskHandle { fn spawn_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Blocking) } fn spawn( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Async) } } /// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an /// essential task has failed. #[derive(Clone)] pub struct SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, inner: SpawnTaskHandle, } impl SpawnEssentialTaskHandle { /// Creates a new `SpawnEssentialTaskHandle`. pub fn new( essential_failed_tx: TracingUnboundedSender<()>, spawn_task_handle: SpawnTaskHandle, ) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle } } /// Spawns the given task with the given name. /// /// See also [`SpawnTaskHandle::spawn`]. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. /// /// See also [`SpawnTaskHandle::spawn_blocking`]. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let essential_failed = self.essential_failed_tx.clone(); let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| { log::error!("Essential task `{}` failed. Shutting down service.", name); let _ = essential_failed.close(); }); let _ = self.inner.spawn_inner(name, group, essential_task, task_type); } } impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle { fn spawn_essential_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_blocking(name, group, future); } fn spawn_essential( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn(name, group, future); } } /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. on_exit: exit_future::Exit, /// A signal that makes the exit future above resolve, fired on drop. _signal: Signal, /// Tokio runtime handle that is used to spawn futures. tokio_handle: Handle, /// Prometheus metric where to report the polling times. metrics: Option<Metrics>, /// Send a signal when a spawned essential task has concluded. The next time /// the service future is polled it should complete with an error. essential_failed_tx: TracingUnboundedSender<()>, /// A receiver for spawned essential-tasks concluding. essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. keep_alive: Box<dyn std::any::Any + Send>, /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec<TaskManager>, /// The registry of all running tasks. task_registry: TaskRegistry, } impl TaskManager { /// If a Prometheus registry is passed, it will be used to report statistics about the /// service tasks. pub fn new( tokio_handle: Handle, prometheus_registry: Option<&Registry>, ) -> Result<Self, PrometheusError> { let (signal, on_exit) = exit_future::signal(); // A side-channel for essential tasks to communicate shutdown. let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks", 100); let metrics = prometheus_registry.map(Metrics::register).transpose()?; Ok(Self { on_exit, _signal: signal, tokio_handle, metrics, essential_failed_tx, essential_failed_rx, keep_alive: Box::new(()), children: Vec::new(), task_registry: Default::default(), }) } /// Get a handle for spawning tasks. pub fn spawn_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { on_exit: self.on_exit.clone(), tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), task_registry: self.task_registry.clone(), } } /// Get a handle for spawning essential tasks. pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) } /// Return a future that will end with success if the signal to terminate was sent /// (`self.terminate()`) or with an error if an essential task fails. /// /// # Warning /// /// This function will not wait until the end of the remaining task. pub fn future<'a>( &'a mut self, ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); let mut t3 = try_join_all( self.children .iter_mut() .map(|x| x.future()) // Never end this future if there is no error because if there is no children, // it must not stop .chain(std::iter::once(pending().boxed())), ) .fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), _ = t2 => Ok(()), res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")), } }) } /// Set what the task manager should keep alive, can be called multiple times. pub fn keep_alive<T: 'static + Send>(&mut self, to_keep_alive: T) { // allows this fn to safely called multiple times. use std::mem; let old = mem::replace(&mut self.keep_alive, Box::new(())); self.keep_alive = Box::new((to_keep_alive, old)); } /// Register another TaskManager to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. (But don't end the parent if a child's normal task fails.) pub fn add_child(&mut self, child: TaskManager) { self.children.push(child); } /// Consume `self` and return the [`TaskRegistry`]. /// /// This [`TaskRegistry`] can be used to check for still running tasks after this task manager /// was dropped. pub fn into_task_registry(self) -> TaskRegistry { self.task_registry } } #[derive(Clone)] struct Metrics { // This list is ordered alphabetically poll_duration: HistogramVec, poll_start: CounterVec<U64>, tasks_spawned: CounterVec<U64>, tasks_ended: CounterVec<U64>, } impl Metrics { fn register(registry: &Registry) -> Result<Self, PrometheusError> { Ok(Self { poll_duration: register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( "substrate_tasks_polling_duration", "Duration in seconds of each invocation of Future::poll" ), buckets: exponential_buckets(0.001, 4.0, 9) .expect("function parameters are constant and always valid; qed"), }, &["task_name", "task_group", "kind"] )?, registry)?, poll_start: register(CounterVec::new( Opts::new( "substrate_tasks_polling_started_total", "Total number of times we started invoking Future::poll" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_spawned: register(CounterVec::new( Opts::new( "substrate_tasks_spawned_total", "Total number of tasks that have been spawned on the Service" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_ended: register(CounterVec::new( Opts::new( "substrate_tasks_ended_total", "Total number of tasks for which Future::poll has returned Ready(()) or panicked" ), &["task_name", "reason", "task_group", "kind"] )?, registry)?, }) } } /// Ensures that a [`Task`] is unregistered when this object is dropped. struct UnregisterOnDrop { task: Task, registry: TaskRegistry, } impl Drop for UnregisterOnDrop { fn drop(&mut self) { let mut tasks = self.registry.tasks.lock(); if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) { *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } } } /// Represents a running async task in the [`TaskManager`]. /// /// As a task is identified by a name and a group, it is totally valid that there exists multiple /// tasks with the same name and group. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Task { /// The name of the task. pub name: &'static str, /// The group this task is associated to. pub group: &'static str, } impl Task { /// Returns if the `group` is the [`DEFAULT_GROUP_NAME`]. pub fn is_default_group(&self) -> bool { self.group == DEFAULT_GROUP_NAME } } /// Keeps track of all running [`Task`]s in [`TaskManager`]. #[derive(Clone, Default)] pub struct TaskRegistry { tasks: Arc<Mutex<HashMap<Task, usize>>>, } impl TaskRegistry { /// Register a task with the given `name` and `group`. /// /// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is /// dropped. fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop { let task = Task { name, group }; { let mut tasks = self.tasks.lock(); *(*tasks).entry(task.clone()).or_default() += 1; } UnregisterOnDrop { task, registry: self.clone() } } /// Returns the running tasks. /// /// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The /// number per task represents the concurrently running tasks with the same identifier. pub fn running_tasks(&self) -> HashMap<Task, usize> { (*self.tasks.lock()).clone() } }
let poll_duration = metrics.poll_duration.with_label_values(&[name, group, task_type_label]); let poll_start = metrics.poll_start.with_label_values(&[name, group, task_type_label]); let inner =
random_line_split
mod.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Substrate service tasks management module. use crate::{config::TaskType, Error}; use exit_future::Signal; use futures::{ future::{pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, }; use parking_lot::Mutex; use prometheus_endpoint::{ exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ collections::{hash_map::Entry, HashMap}, panic, pin::Pin, result::Result, sync::Arc, }; use tokio::runtime::Handle; use tracing_futures::Instrument; mod prometheus_future; #[cfg(test)] mod tests; /// Default task group name. pub const DEFAULT_GROUP_NAME: &str = "default"; /// The name of a group a task belongs to. /// /// This name is passed belong-side the task name to the prometheus metrics and can be used /// to group tasks. pub enum GroupName { /// Sets the group name to `default`. Default, /// Use the specifically given name as group name. Specific(&'static str), } impl From<Option<&'static str>> for GroupName { fn from(name: Option<&'static str>) -> Self { match name { Some(name) => Self::Specific(name), None => Self::Default, } } } impl From<&'static str> for GroupName { fn from(name: &'static str) -> Self { Self::Specific(name) } } /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { on_exit: exit_future::Exit, tokio_handle: Handle, metrics: Option<Metrics>, task_registry: TaskRegistry, } impl SpawnTaskHandle { /// Spawns the given task with the given name and a group name. /// If group is not specified `DEFAULT_GROUP_NAME` will be used. /// /// Note that the `name` is a `&'static str`. The reason for this choice is that /// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and /// that therefore the set of possible task names must be bounded. /// /// In other words, it would be a bad idea for someone to do for example /// `spawn(format!("{:?}", some_public_key))`. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. See also `spawn`. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } /// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`. fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); let registry = self.task_registry.clone(); let group = match group.into() { GroupName::Specific(var) => var, // If no group is specified use default. GroupName::Default => DEFAULT_GROUP_NAME, }; let task_type_label = match task_type { TaskType::Blocking => "blocking", TaskType::Async => "async", }; // Note that we increase the started counter here and not within the future. This way, // we could properly visualize on Prometheus situations where the spawning doesn't work. if let Some(metrics) = &self.metrics { metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc(); // We do a dummy increase in order for the task to show up in metrics. metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc_by(0); } let future = async move { // Register the task and keep the "token" alive until the task is ended. Then this // "token" will unregister this task. let _registry_token = registry.register_task(name, group); if let Some(metrics) = metrics { // Add some wrappers around `task`. let task = { let poll_duration = metrics.poll_duration.with_label_values(&[name, group, task_type_label]); let poll_start = metrics.poll_start.with_label_values(&[name, group, task_type_label]); let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw // away the `Future` after it has panicked. panic::AssertUnwindSafe(inner).catch_unwind() }; futures::pin_mut!(task); match select(on_exit, task).await { Either::Right((Err(payload), _)) => { metrics .tasks_ended .with_label_values(&[name, "panic", group, task_type_label]) .inc(); panic::resume_unwind(payload) }, Either::Right((Ok(()), _)) => { metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc(); }, Either::Left(((), _)) => { // The `on_exit` has triggered. metrics .tasks_ended .with_label_values(&[name, "interrupted", group, task_type_label]) .inc(); }, } } else { futures::pin_mut!(task); let _ = select(on_exit, task).await; } } .in_current_span(); match task_type { TaskType::Async =>
, TaskType::Blocking => { let handle = self.tokio_handle.clone(); self.tokio_handle.spawn_blocking(move || { handle.block_on(future); }); }, } } } impl sp_core::traits::SpawnNamed for SpawnTaskHandle { fn spawn_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Blocking) } fn spawn( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Async) } } /// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an /// essential task has failed. #[derive(Clone)] pub struct SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, inner: SpawnTaskHandle, } impl SpawnEssentialTaskHandle { /// Creates a new `SpawnEssentialTaskHandle`. pub fn new( essential_failed_tx: TracingUnboundedSender<()>, spawn_task_handle: SpawnTaskHandle, ) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle } } /// Spawns the given task with the given name. /// /// See also [`SpawnTaskHandle::spawn`]. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. /// /// See also [`SpawnTaskHandle::spawn_blocking`]. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let essential_failed = self.essential_failed_tx.clone(); let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| { log::error!("Essential task `{}` failed. Shutting down service.", name); let _ = essential_failed.close(); }); let _ = self.inner.spawn_inner(name, group, essential_task, task_type); } } impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle { fn spawn_essential_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_blocking(name, group, future); } fn spawn_essential( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn(name, group, future); } } /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. on_exit: exit_future::Exit, /// A signal that makes the exit future above resolve, fired on drop. _signal: Signal, /// Tokio runtime handle that is used to spawn futures. tokio_handle: Handle, /// Prometheus metric where to report the polling times. metrics: Option<Metrics>, /// Send a signal when a spawned essential task has concluded. The next time /// the service future is polled it should complete with an error. essential_failed_tx: TracingUnboundedSender<()>, /// A receiver for spawned essential-tasks concluding. essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. keep_alive: Box<dyn std::any::Any + Send>, /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec<TaskManager>, /// The registry of all running tasks. task_registry: TaskRegistry, } impl TaskManager { /// If a Prometheus registry is passed, it will be used to report statistics about the /// service tasks. pub fn new( tokio_handle: Handle, prometheus_registry: Option<&Registry>, ) -> Result<Self, PrometheusError> { let (signal, on_exit) = exit_future::signal(); // A side-channel for essential tasks to communicate shutdown. let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks", 100); let metrics = prometheus_registry.map(Metrics::register).transpose()?; Ok(Self { on_exit, _signal: signal, tokio_handle, metrics, essential_failed_tx, essential_failed_rx, keep_alive: Box::new(()), children: Vec::new(), task_registry: Default::default(), }) } /// Get a handle for spawning tasks. pub fn spawn_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { on_exit: self.on_exit.clone(), tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), task_registry: self.task_registry.clone(), } } /// Get a handle for spawning essential tasks. pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) } /// Return a future that will end with success if the signal to terminate was sent /// (`self.terminate()`) or with an error if an essential task fails. /// /// # Warning /// /// This function will not wait until the end of the remaining task. pub fn future<'a>( &'a mut self, ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); let mut t3 = try_join_all( self.children .iter_mut() .map(|x| x.future()) // Never end this future if there is no error because if there is no children, // it must not stop .chain(std::iter::once(pending().boxed())), ) .fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), _ = t2 => Ok(()), res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")), } }) } /// Set what the task manager should keep alive, can be called multiple times. pub fn keep_alive<T: 'static + Send>(&mut self, to_keep_alive: T) { // allows this fn to safely called multiple times. use std::mem; let old = mem::replace(&mut self.keep_alive, Box::new(())); self.keep_alive = Box::new((to_keep_alive, old)); } /// Register another TaskManager to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. (But don't end the parent if a child's normal task fails.) pub fn add_child(&mut self, child: TaskManager) { self.children.push(child); } /// Consume `self` and return the [`TaskRegistry`]. /// /// This [`TaskRegistry`] can be used to check for still running tasks after this task manager /// was dropped. pub fn into_task_registry(self) -> TaskRegistry { self.task_registry } } #[derive(Clone)] struct Metrics { // This list is ordered alphabetically poll_duration: HistogramVec, poll_start: CounterVec<U64>, tasks_spawned: CounterVec<U64>, tasks_ended: CounterVec<U64>, } impl Metrics { fn register(registry: &Registry) -> Result<Self, PrometheusError> { Ok(Self { poll_duration: register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( "substrate_tasks_polling_duration", "Duration in seconds of each invocation of Future::poll" ), buckets: exponential_buckets(0.001, 4.0, 9) .expect("function parameters are constant and always valid; qed"), }, &["task_name", "task_group", "kind"] )?, registry)?, poll_start: register(CounterVec::new( Opts::new( "substrate_tasks_polling_started_total", "Total number of times we started invoking Future::poll" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_spawned: register(CounterVec::new( Opts::new( "substrate_tasks_spawned_total", "Total number of tasks that have been spawned on the Service" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_ended: register(CounterVec::new( Opts::new( "substrate_tasks_ended_total", "Total number of tasks for which Future::poll has returned Ready(()) or panicked" ), &["task_name", "reason", "task_group", "kind"] )?, registry)?, }) } } /// Ensures that a [`Task`] is unregistered when this object is dropped. struct UnregisterOnDrop { task: Task, registry: TaskRegistry, } impl Drop for UnregisterOnDrop { fn drop(&mut self) { let mut tasks = self.registry.tasks.lock(); if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) { *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } } } /// Represents a running async task in the [`TaskManager`]. /// /// As a task is identified by a name and a group, it is totally valid that there exists multiple /// tasks with the same name and group. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Task { /// The name of the task. pub name: &'static str, /// The group this task is associated to. pub group: &'static str, } impl Task { /// Returns if the `group` is the [`DEFAULT_GROUP_NAME`]. pub fn is_default_group(&self) -> bool { self.group == DEFAULT_GROUP_NAME } } /// Keeps track of all running [`Task`]s in [`TaskManager`]. #[derive(Clone, Default)] pub struct TaskRegistry { tasks: Arc<Mutex<HashMap<Task, usize>>>, } impl TaskRegistry { /// Register a task with the given `name` and `group`. /// /// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is /// dropped. fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop { let task = Task { name, group }; { let mut tasks = self.tasks.lock(); *(*tasks).entry(task.clone()).or_default() += 1; } UnregisterOnDrop { task, registry: self.clone() } } /// Returns the running tasks. /// /// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The /// number per task represents the concurrently running tasks with the same identifier. pub fn running_tasks(&self) -> HashMap<Task, usize> { (*self.tasks.lock()).clone() } }
{ self.tokio_handle.spawn(future); }
conditional_block
mod.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Substrate service tasks management module. use crate::{config::TaskType, Error}; use exit_future::Signal; use futures::{ future::{pending, select, try_join_all, BoxFuture, Either}, Future, FutureExt, StreamExt, }; use parking_lot::Mutex; use prometheus_endpoint::{ exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, }; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ collections::{hash_map::Entry, HashMap}, panic, pin::Pin, result::Result, sync::Arc, }; use tokio::runtime::Handle; use tracing_futures::Instrument; mod prometheus_future; #[cfg(test)] mod tests; /// Default task group name. pub const DEFAULT_GROUP_NAME: &str = "default"; /// The name of a group a task belongs to. /// /// This name is passed belong-side the task name to the prometheus metrics and can be used /// to group tasks. pub enum GroupName { /// Sets the group name to `default`. Default, /// Use the specifically given name as group name. Specific(&'static str), } impl From<Option<&'static str>> for GroupName { fn from(name: Option<&'static str>) -> Self { match name { Some(name) => Self::Specific(name), None => Self::Default, } } } impl From<&'static str> for GroupName { fn from(name: &'static str) -> Self { Self::Specific(name) } } /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { on_exit: exit_future::Exit, tokio_handle: Handle, metrics: Option<Metrics>, task_registry: TaskRegistry, } impl SpawnTaskHandle { /// Spawns the given task with the given name and a group name. /// If group is not specified `DEFAULT_GROUP_NAME` will be used. /// /// Note that the `name` is a `&'static str`. The reason for this choice is that /// statistics about this task are getting reported to the Prometheus endpoint (if enabled), and /// that therefore the set of possible task names must be bounded. /// /// In other words, it would be a bad idea for someone to do for example /// `spawn(format!("{:?}", some_public_key))`. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. See also `spawn`. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } /// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`. fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let on_exit = self.on_exit.clone(); let metrics = self.metrics.clone(); let registry = self.task_registry.clone(); let group = match group.into() { GroupName::Specific(var) => var, // If no group is specified use default. GroupName::Default => DEFAULT_GROUP_NAME, }; let task_type_label = match task_type { TaskType::Blocking => "blocking", TaskType::Async => "async", }; // Note that we increase the started counter here and not within the future. This way, // we could properly visualize on Prometheus situations where the spawning doesn't work. if let Some(metrics) = &self.metrics { metrics.tasks_spawned.with_label_values(&[name, group, task_type_label]).inc(); // We do a dummy increase in order for the task to show up in metrics. metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc_by(0); } let future = async move { // Register the task and keep the "token" alive until the task is ended. Then this // "token" will unregister this task. let _registry_token = registry.register_task(name, group); if let Some(metrics) = metrics { // Add some wrappers around `task`. let task = { let poll_duration = metrics.poll_duration.with_label_values(&[name, group, task_type_label]); let poll_start = metrics.poll_start.with_label_values(&[name, group, task_type_label]); let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); // The logic of `AssertUnwindSafe` here is ok considering that we throw // away the `Future` after it has panicked. panic::AssertUnwindSafe(inner).catch_unwind() }; futures::pin_mut!(task); match select(on_exit, task).await { Either::Right((Err(payload), _)) => { metrics .tasks_ended .with_label_values(&[name, "panic", group, task_type_label]) .inc(); panic::resume_unwind(payload) }, Either::Right((Ok(()), _)) => { metrics .tasks_ended .with_label_values(&[name, "finished", group, task_type_label]) .inc(); }, Either::Left(((), _)) => { // The `on_exit` has triggered. metrics .tasks_ended .with_label_values(&[name, "interrupted", group, task_type_label]) .inc(); }, } } else { futures::pin_mut!(task); let _ = select(on_exit, task).await; } } .in_current_span(); match task_type { TaskType::Async => { self.tokio_handle.spawn(future); }, TaskType::Blocking => { let handle = self.tokio_handle.clone(); self.tokio_handle.spawn_blocking(move || { handle.block_on(future); }); }, } } } impl sp_core::traits::SpawnNamed for SpawnTaskHandle { fn spawn_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Blocking) } fn spawn( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_inner(name, group, future, TaskType::Async) } } /// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an /// essential task has failed. #[derive(Clone)] pub struct
{ essential_failed_tx: TracingUnboundedSender<()>, inner: SpawnTaskHandle, } impl SpawnEssentialTaskHandle { /// Creates a new `SpawnEssentialTaskHandle`. pub fn new( essential_failed_tx: TracingUnboundedSender<()>, spawn_task_handle: SpawnTaskHandle, ) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle { essential_failed_tx, inner: spawn_task_handle } } /// Spawns the given task with the given name. /// /// See also [`SpawnTaskHandle::spawn`]. pub fn spawn( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Async) } /// Spawns the blocking task with the given name. /// /// See also [`SpawnTaskHandle::spawn_blocking`]. pub fn spawn_blocking( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, ) { self.spawn_inner(name, group, task, TaskType::Blocking) } fn spawn_inner( &self, name: &'static str, group: impl Into<GroupName>, task: impl Future<Output = ()> + Send + 'static, task_type: TaskType, ) { let essential_failed = self.essential_failed_tx.clone(); let essential_task = std::panic::AssertUnwindSafe(task).catch_unwind().map(move |_| { log::error!("Essential task `{}` failed. Shutting down service.", name); let _ = essential_failed.close(); }); let _ = self.inner.spawn_inner(name, group, essential_task, task_type); } } impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle { fn spawn_essential_blocking( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn_blocking(name, group, future); } fn spawn_essential( &self, name: &'static str, group: Option<&'static str>, future: BoxFuture<'static, ()>, ) { self.spawn(name, group, future); } } /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to /// make sure any internally spawned futures stop when the service does. on_exit: exit_future::Exit, /// A signal that makes the exit future above resolve, fired on drop. _signal: Signal, /// Tokio runtime handle that is used to spawn futures. tokio_handle: Handle, /// Prometheus metric where to report the polling times. metrics: Option<Metrics>, /// Send a signal when a spawned essential task has concluded. The next time /// the service future is polled it should complete with an error. essential_failed_tx: TracingUnboundedSender<()>, /// A receiver for spawned essential-tasks concluding. essential_failed_rx: TracingUnboundedReceiver<()>, /// Things to keep alive until the task manager is dropped. keep_alive: Box<dyn std::any::Any + Send>, /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec<TaskManager>, /// The registry of all running tasks. task_registry: TaskRegistry, } impl TaskManager { /// If a Prometheus registry is passed, it will be used to report statistics about the /// service tasks. pub fn new( tokio_handle: Handle, prometheus_registry: Option<&Registry>, ) -> Result<Self, PrometheusError> { let (signal, on_exit) = exit_future::signal(); // A side-channel for essential tasks to communicate shutdown. let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks", 100); let metrics = prometheus_registry.map(Metrics::register).transpose()?; Ok(Self { on_exit, _signal: signal, tokio_handle, metrics, essential_failed_tx, essential_failed_rx, keep_alive: Box::new(()), children: Vec::new(), task_registry: Default::default(), }) } /// Get a handle for spawning tasks. pub fn spawn_handle(&self) -> SpawnTaskHandle { SpawnTaskHandle { on_exit: self.on_exit.clone(), tokio_handle: self.tokio_handle.clone(), metrics: self.metrics.clone(), task_registry: self.task_registry.clone(), } } /// Get a handle for spawning essential tasks. pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle { SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) } /// Return a future that will end with success if the signal to terminate was sent /// (`self.terminate()`) or with an error if an essential task fails. /// /// # Warning /// /// This function will not wait until the end of the remaining task. pub fn future<'a>( &'a mut self, ) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'a>> { Box::pin(async move { let mut t1 = self.essential_failed_rx.next().fuse(); let mut t2 = self.on_exit.clone().fuse(); let mut t3 = try_join_all( self.children .iter_mut() .map(|x| x.future()) // Never end this future if there is no error because if there is no children, // it must not stop .chain(std::iter::once(pending().boxed())), ) .fuse(); futures::select! { _ = t1 => Err(Error::Other("Essential task failed.".into())), _ = t2 => Ok(()), res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")), } }) } /// Set what the task manager should keep alive, can be called multiple times. pub fn keep_alive<T: 'static + Send>(&mut self, to_keep_alive: T) { // allows this fn to safely called multiple times. use std::mem; let old = mem::replace(&mut self.keep_alive, Box::new(())); self.keep_alive = Box::new((to_keep_alive, old)); } /// Register another TaskManager to terminate and gracefully shutdown when the parent /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. (But don't end the parent if a child's normal task fails.) pub fn add_child(&mut self, child: TaskManager) { self.children.push(child); } /// Consume `self` and return the [`TaskRegistry`]. /// /// This [`TaskRegistry`] can be used to check for still running tasks after this task manager /// was dropped. pub fn into_task_registry(self) -> TaskRegistry { self.task_registry } } #[derive(Clone)] struct Metrics { // This list is ordered alphabetically poll_duration: HistogramVec, poll_start: CounterVec<U64>, tasks_spawned: CounterVec<U64>, tasks_ended: CounterVec<U64>, } impl Metrics { fn register(registry: &Registry) -> Result<Self, PrometheusError> { Ok(Self { poll_duration: register(HistogramVec::new( HistogramOpts { common_opts: Opts::new( "substrate_tasks_polling_duration", "Duration in seconds of each invocation of Future::poll" ), buckets: exponential_buckets(0.001, 4.0, 9) .expect("function parameters are constant and always valid; qed"), }, &["task_name", "task_group", "kind"] )?, registry)?, poll_start: register(CounterVec::new( Opts::new( "substrate_tasks_polling_started_total", "Total number of times we started invoking Future::poll" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_spawned: register(CounterVec::new( Opts::new( "substrate_tasks_spawned_total", "Total number of tasks that have been spawned on the Service" ), &["task_name", "task_group", "kind"] )?, registry)?, tasks_ended: register(CounterVec::new( Opts::new( "substrate_tasks_ended_total", "Total number of tasks for which Future::poll has returned Ready(()) or panicked" ), &["task_name", "reason", "task_group", "kind"] )?, registry)?, }) } } /// Ensures that a [`Task`] is unregistered when this object is dropped. struct UnregisterOnDrop { task: Task, registry: TaskRegistry, } impl Drop for UnregisterOnDrop { fn drop(&mut self) { let mut tasks = self.registry.tasks.lock(); if let Entry::Occupied(mut entry) = (*tasks).entry(self.task.clone()) { *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } } } /// Represents a running async task in the [`TaskManager`]. /// /// As a task is identified by a name and a group, it is totally valid that there exists multiple /// tasks with the same name and group. #[derive(Clone, Hash, Eq, PartialEq)] pub struct Task { /// The name of the task. pub name: &'static str, /// The group this task is associated to. pub group: &'static str, } impl Task { /// Returns if the `group` is the [`DEFAULT_GROUP_NAME`]. pub fn is_default_group(&self) -> bool { self.group == DEFAULT_GROUP_NAME } } /// Keeps track of all running [`Task`]s in [`TaskManager`]. #[derive(Clone, Default)] pub struct TaskRegistry { tasks: Arc<Mutex<HashMap<Task, usize>>>, } impl TaskRegistry { /// Register a task with the given `name` and `group`. /// /// Returns [`UnregisterOnDrop`] that ensures that the task is unregistered when this value is /// dropped. fn register_task(&self, name: &'static str, group: &'static str) -> UnregisterOnDrop { let task = Task { name, group }; { let mut tasks = self.tasks.lock(); *(*tasks).entry(task.clone()).or_default() += 1; } UnregisterOnDrop { task, registry: self.clone() } } /// Returns the running tasks. /// /// As a task is only identified by its `name` and `group`, there can be duplicate tasks. The /// number per task represents the concurrently running tasks with the same identifier. pub fn running_tasks(&self) -> HashMap<Task, usize> { (*self.tasks.lock()).clone() } }
SpawnEssentialTaskHandle
identifier_name
adapters.py
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import glob import os from distutils.version import StrictVersion from urlparse import urljoin import six import yaml from nailgun.errors import errors from nailgun.logger import logger from nailgun.objects.plugin import ClusterPlugins from nailgun.objects.plugin import Plugin from nailgun.settings import settings @six.add_metaclass(abc.ABCMeta) class PluginAdapterBase(object): """Implements wrapper for plugin db model configuration files logic 1. Uploading plugin provided cluster attributes 2. Uploading tasks 3. Enabling/Disabling of plugin based on cluster attributes 4. Providing repositories/deployment scripts related info to clients """ environment_config_name = 'environment_config.yaml' plugin_metadata = 'metadata.yaml' task_config_name = 'tasks.yaml' def __init__(self, plugin): self.plugin = plugin self.plugin_path = os.path.join( settings.PLUGINS_PATH, self.path_name) self.tasks = [] @abc.abstractmethod def path_name(self): """A name which is used to create path to plugin scripts and repos""" def sync_metadata_to_db(self): """Sync metadata from config yaml files into DB""" metadata_file_path = os.path.join( self.plugin_path, self.plugin_metadata) metadata = self._load_config(metadata_file_path) or {} Plugin.update(self.plugin, metadata) def _load_config(self, config): if os.access(config, os.R_OK): with open(config, "r") as conf: try: return yaml.safe_load(conf.read()) except yaml.YAMLError as exc: logger.warning(exc) raise errors.ParseError( 'Problem with loading YAML file {0}'.format(config)) else: logger.warning("Config {0} is not readable.".format(config)) def _load_tasks(self, config): data = self._load_config(config) for item in data: # backward compatibility for plugins added in version 6.0, # and it is expected that task with role: [controller] # will be executed on all controllers if (StrictVersion(self.plugin.package_version) == StrictVersion('1.0') and isinstance(item['role'], list) and 'controller' in item['role']): item['role'].append('primary-controller') return data def set_cluster_tasks(self): """Load plugins provided tasks and set them to instance tasks variable Provided tasks are loaded from tasks config file. """ task_yaml = os.path.join( self.plugin_path, self.task_config_name) if os.path.exists(task_yaml): self.tasks = self._load_tasks(task_yaml) def filter_tasks(self, tasks, stage): filtered = [] for task in tasks: if stage and stage == task.get('stage'): filtered.append(task) return filtered @property def plugin_release_versions(self): if not self.plugin.releases: return set() return set([rel['version'] for rel in self.plugin.releases]) @property def
(self): return u'{0}-{1}'.format(self.plugin.name, self.plugin.version) @property def slaves_scripts_path(self): return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format( plugin_name=self.path_name) @property def deployment_tasks(self): deployment_tasks = [] for task in self.plugin.deployment_tasks: if task.get('parameters'): task['parameters'].setdefault('cwd', self.slaves_scripts_path) deployment_tasks.append(task) return deployment_tasks @property def volumes_metadata(self): return self.plugin.volumes_metadata @property def components_metadata(self): return self.plugin.components_metadata @property def releases(self): return self.plugin.releases @property def normalized_roles_metadata(self): """Block plugin disabling if nodes with plugin-provided roles exist""" result = {} for role, meta in six.iteritems(self.plugin.roles_metadata): condition = "settings:{0}.metadata.enabled == false".format( self.plugin.name) meta = copy.copy(meta) meta['restrictions'] = [condition] + meta.get('restrictions', []) result[role] = meta return result def get_release_info(self, release): """Get plugin release information which corresponds to given release""" rel_os = release.operating_system.lower() version = release.version release_info = filter( lambda r: ( r['os'] == rel_os and ClusterPlugins.is_release_version_compatible(version, r['version'])), self.plugin.releases) return release_info[0] def repo_files(self, cluster): release_info = self.get_release_info(cluster.release) repo_path = os.path.join( settings.PLUGINS_PATH, self.path_name, release_info['repository_path'], '*') return glob.glob(repo_path) def repo_url(self, cluster): release_info = self.get_release_info(cluster.release) repo_base = settings.PLUGINS_REPO_URL.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return urljoin(repo_base, release_info['repository_path']) def master_scripts_path(self, cluster): release_info = self.get_release_info(cluster.release) # NOTE(eli): we cannot user urljoin here, because it # works wrong, if protocol is rsync base_url = settings.PLUGINS_SLAVES_RSYNC.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return '{0}{1}'.format( base_url, release_info['deployment_scripts_path']) class PluginAdapterV1(PluginAdapterBase): """Plugins attributes class for package version 1.0.0""" @property def path_name(self): """Returns a name and full version e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0.0" """ return self.full_name class PluginAdapterV2(PluginAdapterBase): """Plugins attributes class for package version 2.0.0""" @property def path_name(self): """Returns a name and major version of the plugin e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0". It's different from previous version because in previous version we did not have plugin updates, in 2.0.0 version we should expect different plugin path. See blueprint: https://blueprints.launchpad.net/fuel/+spec /plugins-security-fixes-delivery """ return u'{0}-{1}'.format(self.plugin.name, self._major_version) @property def _major_version(self): """Returns major version of plugin's version e.g. if plugin has 1.2.3 version, the method returns 1.2 """ version_tuple = StrictVersion(self.plugin.version).version major = '.'.join(map(str, version_tuple[:2])) return major class PluginAdapterV3(PluginAdapterV2): """Plugin wrapper class for package version 3.0.0""" node_roles_config_name = 'node_roles.yaml' volumes_config_name = 'volumes.yaml' deployment_tasks_config_name = 'deployment_tasks.yaml' network_roles_config_name = 'network_roles.yaml' def sync_metadata_to_db(self): """Sync metadata from all config yaml files to DB""" super(PluginAdapterV3, self).sync_metadata_to_db() db_config_metadata_mapping = { 'attributes_metadata': self.environment_config_name, 'roles_metadata': self.node_roles_config_name, 'volumes_metadata': self.volumes_config_name, 'network_roles_metadata': self.network_roles_config_name, 'deployment_tasks': self.deployment_tasks_config_name, 'tasks': self.task_config_name } self._update_plugin(db_config_metadata_mapping) def _update_plugin(self, mapping): data_to_update = {} for attribute, config in six.iteritems(mapping): config_file_path = os.path.join(self.plugin_path, config) attribute_data = self._load_config(config_file_path) # Plugin columns have constraints for nullable data, so # we need to check it if attribute_data: if attribute == 'attributes_metadata': attribute_data = attribute_data['attributes'] data_to_update[attribute] = attribute_data Plugin.update(self.plugin, data_to_update) class PluginAdapterV4(PluginAdapterV3): """Plugin wrapper class for package version 4.0.0""" components = 'components.yaml' def sync_metadata_to_db(self): super(PluginAdapterV4, self).sync_metadata_to_db() db_config_metadata_mapping = { 'components_metadata': self.components } self._update_plugin(db_config_metadata_mapping) __version_mapping = { '1.0.': PluginAdapterV1, '2.0.': PluginAdapterV2, '3.0.': PluginAdapterV3, '4.0.': PluginAdapterV4 } def wrap_plugin(plugin): """Creates plugin object with specific class version :param plugin: plugin db object :returns: cluster attribute object """ package_version = plugin.package_version attr_class = None # Filter by major version for version, klass in six.iteritems(__version_mapping): if package_version.startswith(version): attr_class = klass break if not attr_class: supported_versions = ', '.join(__version_mapping.keys()) raise errors.PackageVersionIsNotCompatible( 'Plugin id={0} package_version={1} ' 'is not supported by Nailgun, currently ' 'supported versions {2}'.format( plugin.id, package_version, supported_versions)) return attr_class(plugin)
full_name
identifier_name
adapters.py
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import glob import os from distutils.version import StrictVersion from urlparse import urljoin import six import yaml from nailgun.errors import errors from nailgun.logger import logger from nailgun.objects.plugin import ClusterPlugins from nailgun.objects.plugin import Plugin from nailgun.settings import settings @six.add_metaclass(abc.ABCMeta) class PluginAdapterBase(object): """Implements wrapper for plugin db model configuration files logic 1. Uploading plugin provided cluster attributes 2. Uploading tasks 3. Enabling/Disabling of plugin based on cluster attributes 4. Providing repositories/deployment scripts related info to clients """ environment_config_name = 'environment_config.yaml' plugin_metadata = 'metadata.yaml' task_config_name = 'tasks.yaml' def __init__(self, plugin): self.plugin = plugin self.plugin_path = os.path.join( settings.PLUGINS_PATH, self.path_name) self.tasks = [] @abc.abstractmethod def path_name(self): """A name which is used to create path to plugin scripts and repos""" def sync_metadata_to_db(self): """Sync metadata from config yaml files into DB""" metadata_file_path = os.path.join( self.plugin_path, self.plugin_metadata) metadata = self._load_config(metadata_file_path) or {} Plugin.update(self.plugin, metadata) def _load_config(self, config): if os.access(config, os.R_OK): with open(config, "r") as conf: try: return yaml.safe_load(conf.read()) except yaml.YAMLError as exc: logger.warning(exc) raise errors.ParseError( 'Problem with loading YAML file {0}'.format(config)) else: logger.warning("Config {0} is not readable.".format(config)) def _load_tasks(self, config): data = self._load_config(config) for item in data: # backward compatibility for plugins added in version 6.0, # and it is expected that task with role: [controller] # will be executed on all controllers if (StrictVersion(self.plugin.package_version) == StrictVersion('1.0') and isinstance(item['role'], list) and 'controller' in item['role']): item['role'].append('primary-controller') return data def set_cluster_tasks(self): """Load plugins provided tasks and set them to instance tasks variable Provided tasks are loaded from tasks config file. """ task_yaml = os.path.join( self.plugin_path, self.task_config_name) if os.path.exists(task_yaml): self.tasks = self._load_tasks(task_yaml) def filter_tasks(self, tasks, stage): filtered = [] for task in tasks: if stage and stage == task.get('stage'): filtered.append(task) return filtered @property def plugin_release_versions(self): if not self.plugin.releases: return set() return set([rel['version'] for rel in self.plugin.releases]) @property def full_name(self): return u'{0}-{1}'.format(self.plugin.name, self.plugin.version) @property def slaves_scripts_path(self): return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format( plugin_name=self.path_name) @property def deployment_tasks(self): deployment_tasks = [] for task in self.plugin.deployment_tasks: if task.get('parameters'): task['parameters'].setdefault('cwd', self.slaves_scripts_path) deployment_tasks.append(task) return deployment_tasks @property def volumes_metadata(self): return self.plugin.volumes_metadata @property def components_metadata(self): return self.plugin.components_metadata @property def releases(self): return self.plugin.releases @property def normalized_roles_metadata(self): """Block plugin disabling if nodes with plugin-provided roles exist""" result = {} for role, meta in six.iteritems(self.plugin.roles_metadata): condition = "settings:{0}.metadata.enabled == false".format( self.plugin.name) meta = copy.copy(meta) meta['restrictions'] = [condition] + meta.get('restrictions', []) result[role] = meta return result def get_release_info(self, release): """Get plugin release information which corresponds to given release""" rel_os = release.operating_system.lower() version = release.version release_info = filter( lambda r: ( r['os'] == rel_os and ClusterPlugins.is_release_version_compatible(version, r['version'])), self.plugin.releases) return release_info[0] def repo_files(self, cluster): release_info = self.get_release_info(cluster.release) repo_path = os.path.join( settings.PLUGINS_PATH, self.path_name, release_info['repository_path'], '*') return glob.glob(repo_path) def repo_url(self, cluster): release_info = self.get_release_info(cluster.release) repo_base = settings.PLUGINS_REPO_URL.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return urljoin(repo_base, release_info['repository_path']) def master_scripts_path(self, cluster): release_info = self.get_release_info(cluster.release) # NOTE(eli): we cannot user urljoin here, because it # works wrong, if protocol is rsync base_url = settings.PLUGINS_SLAVES_RSYNC.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return '{0}{1}'.format( base_url, release_info['deployment_scripts_path']) class PluginAdapterV1(PluginAdapterBase): """Plugins attributes class for package version 1.0.0""" @property def path_name(self): """Returns a name and full version e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0.0" """ return self.full_name class PluginAdapterV2(PluginAdapterBase): """Plugins attributes class for package version 2.0.0""" @property def path_name(self): """Returns a name and major version of the plugin e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0". It's different from previous version because in previous version we did not have plugin updates, in 2.0.0 version we should expect different plugin path. See blueprint: https://blueprints.launchpad.net/fuel/+spec /plugins-security-fixes-delivery """ return u'{0}-{1}'.format(self.plugin.name, self._major_version) @property def _major_version(self): """Returns major version of plugin's version e.g. if plugin has 1.2.3 version, the method returns 1.2 """ version_tuple = StrictVersion(self.plugin.version).version major = '.'.join(map(str, version_tuple[:2])) return major class PluginAdapterV3(PluginAdapterV2): """Plugin wrapper class for package version 3.0.0""" node_roles_config_name = 'node_roles.yaml' volumes_config_name = 'volumes.yaml' deployment_tasks_config_name = 'deployment_tasks.yaml' network_roles_config_name = 'network_roles.yaml' def sync_metadata_to_db(self): """Sync metadata from all config yaml files to DB""" super(PluginAdapterV3, self).sync_metadata_to_db() db_config_metadata_mapping = { 'attributes_metadata': self.environment_config_name, 'roles_metadata': self.node_roles_config_name, 'volumes_metadata': self.volumes_config_name, 'network_roles_metadata': self.network_roles_config_name, 'deployment_tasks': self.deployment_tasks_config_name, 'tasks': self.task_config_name } self._update_plugin(db_config_metadata_mapping) def _update_plugin(self, mapping): data_to_update = {} for attribute, config in six.iteritems(mapping): config_file_path = os.path.join(self.plugin_path, config) attribute_data = self._load_config(config_file_path) # Plugin columns have constraints for nullable data, so # we need to check it if attribute_data: if attribute == 'attributes_metadata': attribute_data = attribute_data['attributes']
class PluginAdapterV4(PluginAdapterV3): """Plugin wrapper class for package version 4.0.0""" components = 'components.yaml' def sync_metadata_to_db(self): super(PluginAdapterV4, self).sync_metadata_to_db() db_config_metadata_mapping = { 'components_metadata': self.components } self._update_plugin(db_config_metadata_mapping) __version_mapping = { '1.0.': PluginAdapterV1, '2.0.': PluginAdapterV2, '3.0.': PluginAdapterV3, '4.0.': PluginAdapterV4 } def wrap_plugin(plugin): """Creates plugin object with specific class version :param plugin: plugin db object :returns: cluster attribute object """ package_version = plugin.package_version attr_class = None # Filter by major version for version, klass in six.iteritems(__version_mapping): if package_version.startswith(version): attr_class = klass break if not attr_class: supported_versions = ', '.join(__version_mapping.keys()) raise errors.PackageVersionIsNotCompatible( 'Plugin id={0} package_version={1} ' 'is not supported by Nailgun, currently ' 'supported versions {2}'.format( plugin.id, package_version, supported_versions)) return attr_class(plugin)
data_to_update[attribute] = attribute_data Plugin.update(self.plugin, data_to_update)
random_line_split
adapters.py
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import glob import os from distutils.version import StrictVersion from urlparse import urljoin import six import yaml from nailgun.errors import errors from nailgun.logger import logger from nailgun.objects.plugin import ClusterPlugins from nailgun.objects.plugin import Plugin from nailgun.settings import settings @six.add_metaclass(abc.ABCMeta) class PluginAdapterBase(object): """Implements wrapper for plugin db model configuration files logic 1. Uploading plugin provided cluster attributes 2. Uploading tasks 3. Enabling/Disabling of plugin based on cluster attributes 4. Providing repositories/deployment scripts related info to clients """ environment_config_name = 'environment_config.yaml' plugin_metadata = 'metadata.yaml' task_config_name = 'tasks.yaml' def __init__(self, plugin): self.plugin = plugin self.plugin_path = os.path.join( settings.PLUGINS_PATH, self.path_name) self.tasks = [] @abc.abstractmethod def path_name(self): """A name which is used to create path to plugin scripts and repos""" def sync_metadata_to_db(self): """Sync metadata from config yaml files into DB""" metadata_file_path = os.path.join( self.plugin_path, self.plugin_metadata) metadata = self._load_config(metadata_file_path) or {} Plugin.update(self.plugin, metadata) def _load_config(self, config): if os.access(config, os.R_OK):
else: logger.warning("Config {0} is not readable.".format(config)) def _load_tasks(self, config): data = self._load_config(config) for item in data: # backward compatibility for plugins added in version 6.0, # and it is expected that task with role: [controller] # will be executed on all controllers if (StrictVersion(self.plugin.package_version) == StrictVersion('1.0') and isinstance(item['role'], list) and 'controller' in item['role']): item['role'].append('primary-controller') return data def set_cluster_tasks(self): """Load plugins provided tasks and set them to instance tasks variable Provided tasks are loaded from tasks config file. """ task_yaml = os.path.join( self.plugin_path, self.task_config_name) if os.path.exists(task_yaml): self.tasks = self._load_tasks(task_yaml) def filter_tasks(self, tasks, stage): filtered = [] for task in tasks: if stage and stage == task.get('stage'): filtered.append(task) return filtered @property def plugin_release_versions(self): if not self.plugin.releases: return set() return set([rel['version'] for rel in self.plugin.releases]) @property def full_name(self): return u'{0}-{1}'.format(self.plugin.name, self.plugin.version) @property def slaves_scripts_path(self): return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format( plugin_name=self.path_name) @property def deployment_tasks(self): deployment_tasks = [] for task in self.plugin.deployment_tasks: if task.get('parameters'): task['parameters'].setdefault('cwd', self.slaves_scripts_path) deployment_tasks.append(task) return deployment_tasks @property def volumes_metadata(self): return self.plugin.volumes_metadata @property def components_metadata(self): return self.plugin.components_metadata @property def releases(self): return self.plugin.releases @property def normalized_roles_metadata(self): """Block plugin disabling if nodes with plugin-provided roles exist""" result = {} for role, meta in six.iteritems(self.plugin.roles_metadata): condition = "settings:{0}.metadata.enabled == false".format( self.plugin.name) meta = copy.copy(meta) meta['restrictions'] = [condition] + meta.get('restrictions', []) result[role] = meta return result def get_release_info(self, release): """Get plugin release information which corresponds to given release""" rel_os = release.operating_system.lower() version = release.version release_info = filter( lambda r: ( r['os'] == rel_os and ClusterPlugins.is_release_version_compatible(version, r['version'])), self.plugin.releases) return release_info[0] def repo_files(self, cluster): release_info = self.get_release_info(cluster.release) repo_path = os.path.join( settings.PLUGINS_PATH, self.path_name, release_info['repository_path'], '*') return glob.glob(repo_path) def repo_url(self, cluster): release_info = self.get_release_info(cluster.release) repo_base = settings.PLUGINS_REPO_URL.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return urljoin(repo_base, release_info['repository_path']) def master_scripts_path(self, cluster): release_info = self.get_release_info(cluster.release) # NOTE(eli): we cannot user urljoin here, because it # works wrong, if protocol is rsync base_url = settings.PLUGINS_SLAVES_RSYNC.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return '{0}{1}'.format( base_url, release_info['deployment_scripts_path']) class PluginAdapterV1(PluginAdapterBase): """Plugins attributes class for package version 1.0.0""" @property def path_name(self): """Returns a name and full version e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0.0" """ return self.full_name class PluginAdapterV2(PluginAdapterBase): """Plugins attributes class for package version 2.0.0""" @property def path_name(self): """Returns a name and major version of the plugin e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0". It's different from previous version because in previous version we did not have plugin updates, in 2.0.0 version we should expect different plugin path. See blueprint: https://blueprints.launchpad.net/fuel/+spec /plugins-security-fixes-delivery """ return u'{0}-{1}'.format(self.plugin.name, self._major_version) @property def _major_version(self): """Returns major version of plugin's version e.g. if plugin has 1.2.3 version, the method returns 1.2 """ version_tuple = StrictVersion(self.plugin.version).version major = '.'.join(map(str, version_tuple[:2])) return major class PluginAdapterV3(PluginAdapterV2): """Plugin wrapper class for package version 3.0.0""" node_roles_config_name = 'node_roles.yaml' volumes_config_name = 'volumes.yaml' deployment_tasks_config_name = 'deployment_tasks.yaml' network_roles_config_name = 'network_roles.yaml' def sync_metadata_to_db(self): """Sync metadata from all config yaml files to DB""" super(PluginAdapterV3, self).sync_metadata_to_db() db_config_metadata_mapping = { 'attributes_metadata': self.environment_config_name, 'roles_metadata': self.node_roles_config_name, 'volumes_metadata': self.volumes_config_name, 'network_roles_metadata': self.network_roles_config_name, 'deployment_tasks': self.deployment_tasks_config_name, 'tasks': self.task_config_name } self._update_plugin(db_config_metadata_mapping) def _update_plugin(self, mapping): data_to_update = {} for attribute, config in six.iteritems(mapping): config_file_path = os.path.join(self.plugin_path, config) attribute_data = self._load_config(config_file_path) # Plugin columns have constraints for nullable data, so # we need to check it if attribute_data: if attribute == 'attributes_metadata': attribute_data = attribute_data['attributes'] data_to_update[attribute] = attribute_data Plugin.update(self.plugin, data_to_update) class PluginAdapterV4(PluginAdapterV3): """Plugin wrapper class for package version 4.0.0""" components = 'components.yaml' def sync_metadata_to_db(self): super(PluginAdapterV4, self).sync_metadata_to_db() db_config_metadata_mapping = { 'components_metadata': self.components } self._update_plugin(db_config_metadata_mapping) __version_mapping = { '1.0.': PluginAdapterV1, '2.0.': PluginAdapterV2, '3.0.': PluginAdapterV3, '4.0.': PluginAdapterV4 } def wrap_plugin(plugin): """Creates plugin object with specific class version :param plugin: plugin db object :returns: cluster attribute object """ package_version = plugin.package_version attr_class = None # Filter by major version for version, klass in six.iteritems(__version_mapping): if package_version.startswith(version): attr_class = klass break if not attr_class: supported_versions = ', '.join(__version_mapping.keys()) raise errors.PackageVersionIsNotCompatible( 'Plugin id={0} package_version={1} ' 'is not supported by Nailgun, currently ' 'supported versions {2}'.format( plugin.id, package_version, supported_versions)) return attr_class(plugin)
with open(config, "r") as conf: try: return yaml.safe_load(conf.read()) except yaml.YAMLError as exc: logger.warning(exc) raise errors.ParseError( 'Problem with loading YAML file {0}'.format(config))
conditional_block
adapters.py
# Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import copy import glob import os from distutils.version import StrictVersion from urlparse import urljoin import six import yaml from nailgun.errors import errors from nailgun.logger import logger from nailgun.objects.plugin import ClusterPlugins from nailgun.objects.plugin import Plugin from nailgun.settings import settings @six.add_metaclass(abc.ABCMeta) class PluginAdapterBase(object): """Implements wrapper for plugin db model configuration files logic 1. Uploading plugin provided cluster attributes 2. Uploading tasks 3. Enabling/Disabling of plugin based on cluster attributes 4. Providing repositories/deployment scripts related info to clients """ environment_config_name = 'environment_config.yaml' plugin_metadata = 'metadata.yaml' task_config_name = 'tasks.yaml' def __init__(self, plugin): self.plugin = plugin self.plugin_path = os.path.join( settings.PLUGINS_PATH, self.path_name) self.tasks = [] @abc.abstractmethod def path_name(self): """A name which is used to create path to plugin scripts and repos""" def sync_metadata_to_db(self): """Sync metadata from config yaml files into DB""" metadata_file_path = os.path.join( self.plugin_path, self.plugin_metadata) metadata = self._load_config(metadata_file_path) or {} Plugin.update(self.plugin, metadata) def _load_config(self, config): if os.access(config, os.R_OK): with open(config, "r") as conf: try: return yaml.safe_load(conf.read()) except yaml.YAMLError as exc: logger.warning(exc) raise errors.ParseError( 'Problem with loading YAML file {0}'.format(config)) else: logger.warning("Config {0} is not readable.".format(config)) def _load_tasks(self, config): data = self._load_config(config) for item in data: # backward compatibility for plugins added in version 6.0, # and it is expected that task with role: [controller] # will be executed on all controllers if (StrictVersion(self.plugin.package_version) == StrictVersion('1.0') and isinstance(item['role'], list) and 'controller' in item['role']): item['role'].append('primary-controller') return data def set_cluster_tasks(self): """Load plugins provided tasks and set them to instance tasks variable Provided tasks are loaded from tasks config file. """ task_yaml = os.path.join( self.plugin_path, self.task_config_name) if os.path.exists(task_yaml): self.tasks = self._load_tasks(task_yaml) def filter_tasks(self, tasks, stage): filtered = [] for task in tasks: if stage and stage == task.get('stage'): filtered.append(task) return filtered @property def plugin_release_versions(self): if not self.plugin.releases: return set() return set([rel['version'] for rel in self.plugin.releases]) @property def full_name(self): return u'{0}-{1}'.format(self.plugin.name, self.plugin.version) @property def slaves_scripts_path(self): return settings.PLUGINS_SLAVES_SCRIPTS_PATH.format( plugin_name=self.path_name) @property def deployment_tasks(self): deployment_tasks = [] for task in self.plugin.deployment_tasks: if task.get('parameters'): task['parameters'].setdefault('cwd', self.slaves_scripts_path) deployment_tasks.append(task) return deployment_tasks @property def volumes_metadata(self): return self.plugin.volumes_metadata @property def components_metadata(self): return self.plugin.components_metadata @property def releases(self): return self.plugin.releases @property def normalized_roles_metadata(self): """Block plugin disabling if nodes with plugin-provided roles exist""" result = {} for role, meta in six.iteritems(self.plugin.roles_metadata): condition = "settings:{0}.metadata.enabled == false".format( self.plugin.name) meta = copy.copy(meta) meta['restrictions'] = [condition] + meta.get('restrictions', []) result[role] = meta return result def get_release_info(self, release): """Get plugin release information which corresponds to given release""" rel_os = release.operating_system.lower() version = release.version release_info = filter( lambda r: ( r['os'] == rel_os and ClusterPlugins.is_release_version_compatible(version, r['version'])), self.plugin.releases) return release_info[0] def repo_files(self, cluster): release_info = self.get_release_info(cluster.release) repo_path = os.path.join( settings.PLUGINS_PATH, self.path_name, release_info['repository_path'], '*') return glob.glob(repo_path) def repo_url(self, cluster): release_info = self.get_release_info(cluster.release) repo_base = settings.PLUGINS_REPO_URL.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return urljoin(repo_base, release_info['repository_path']) def master_scripts_path(self, cluster): release_info = self.get_release_info(cluster.release) # NOTE(eli): we cannot user urljoin here, because it # works wrong, if protocol is rsync base_url = settings.PLUGINS_SLAVES_RSYNC.format( master_ip=settings.MASTER_IP, plugin_name=self.path_name) return '{0}{1}'.format( base_url, release_info['deployment_scripts_path']) class PluginAdapterV1(PluginAdapterBase): """Plugins attributes class for package version 1.0.0""" @property def path_name(self): """Returns a name and full version e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0.0" """ return self.full_name class PluginAdapterV2(PluginAdapterBase): """Plugins attributes class for package version 2.0.0""" @property def path_name(self): """Returns a name and major version of the plugin e.g. if there is a plugin with name "plugin_name" and version is "1.0.0", the method returns "plugin_name-1.0". It's different from previous version because in previous version we did not have plugin updates, in 2.0.0 version we should expect different plugin path. See blueprint: https://blueprints.launchpad.net/fuel/+spec /plugins-security-fixes-delivery """ return u'{0}-{1}'.format(self.plugin.name, self._major_version) @property def _major_version(self): """Returns major version of plugin's version e.g. if plugin has 1.2.3 version, the method returns 1.2 """ version_tuple = StrictVersion(self.plugin.version).version major = '.'.join(map(str, version_tuple[:2])) return major class PluginAdapterV3(PluginAdapterV2):
class PluginAdapterV4(PluginAdapterV3): """Plugin wrapper class for package version 4.0.0""" components = 'components.yaml' def sync_metadata_to_db(self): super(PluginAdapterV4, self).sync_metadata_to_db() db_config_metadata_mapping = { 'components_metadata': self.components } self._update_plugin(db_config_metadata_mapping) __version_mapping = { '1.0.': PluginAdapterV1, '2.0.': PluginAdapterV2, '3.0.': PluginAdapterV3, '4.0.': PluginAdapterV4 } def wrap_plugin(plugin): """Creates plugin object with specific class version :param plugin: plugin db object :returns: cluster attribute object """ package_version = plugin.package_version attr_class = None # Filter by major version for version, klass in six.iteritems(__version_mapping): if package_version.startswith(version): attr_class = klass break if not attr_class: supported_versions = ', '.join(__version_mapping.keys()) raise errors.PackageVersionIsNotCompatible( 'Plugin id={0} package_version={1} ' 'is not supported by Nailgun, currently ' 'supported versions {2}'.format( plugin.id, package_version, supported_versions)) return attr_class(plugin)
"""Plugin wrapper class for package version 3.0.0""" node_roles_config_name = 'node_roles.yaml' volumes_config_name = 'volumes.yaml' deployment_tasks_config_name = 'deployment_tasks.yaml' network_roles_config_name = 'network_roles.yaml' def sync_metadata_to_db(self): """Sync metadata from all config yaml files to DB""" super(PluginAdapterV3, self).sync_metadata_to_db() db_config_metadata_mapping = { 'attributes_metadata': self.environment_config_name, 'roles_metadata': self.node_roles_config_name, 'volumes_metadata': self.volumes_config_name, 'network_roles_metadata': self.network_roles_config_name, 'deployment_tasks': self.deployment_tasks_config_name, 'tasks': self.task_config_name } self._update_plugin(db_config_metadata_mapping) def _update_plugin(self, mapping): data_to_update = {} for attribute, config in six.iteritems(mapping): config_file_path = os.path.join(self.plugin_path, config) attribute_data = self._load_config(config_file_path) # Plugin columns have constraints for nullable data, so # we need to check it if attribute_data: if attribute == 'attributes_metadata': attribute_data = attribute_data['attributes'] data_to_update[attribute] = attribute_data Plugin.update(self.plugin, data_to_update)
identifier_body
food.py
''' Raj Palleti Last revision: 8/16/19 This class instantiates foodProcessor to get each person's foods, allergens, and nutrients from the user's text, which is recognized using the Google Voice Kit. It then uses foodLog to log each person's dietary info into their own sheet in a Google Spreadsheet. ''' # Set the 'JAVAHOME' variable to your own path of jdk/bin/java. import os os.environ['JAVAHOME'] = "/usr/java/jdk1.8.0_202/bin/java" import aiy.assistant.grpc import aiy.audio import aiy.voicehat import sys from nltk.tag import StanfordNERTagger from nltk.tokenize import word_tokenize import gender_guesser.detector as gender import foodProcessor import foodLog from datetime import datetime from pytz import timezone def convert_pronouns_to_names(text, username): ''' This method uses Stanford NER and the gender-guesser package to convert pronouns in the text to their corresponding names. It returns the processed text after replacing all pronouns. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) det = gender.Detector() wordCount = len(tokenized_text) # Keep track of the most recent male and female names, which will be used to replace the pronouns "He" and "She". lastMaleName = '' lastFemaleName = '' index = 0 newText = '' for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if word == 'He' or word == 'he': if lastMaleName != '': newText = newText + lastMaleName else: newText = newText + word elif word == 'She' or word == 'she': if lastFemaleName != '': newText = newText + lastFemaleName else: newText = newText + word elif word == 'I': newText = newText + username else: newText = newText + word if partOfSpeech == 'PERSON': if "female" in det.get_gender(word): lastFemaleName = word elif "male" in det.get_gender(word): lastMaleName = word index = index + len(word) if index < len(text) and text[index] == ' ': index = index + 1 newText += ' ' return newText def
(text): ''' This method splits the text into substrings, where each begins with a name and continues until reaching the next name. It will return the list of substrings and a list that contains the name in each substring. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) wordCount = len(tokenized_text) # charIndexes stores the starting indexes for each name from the text. charIndexes = [] charCounter = 0 newCharCounter = 0 substrings = [] names = [] for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if partOfSpeech == 'PERSON': newCharCounter = text.find(word, charCounter) charIndexes.append(newCharCounter) charCounter = newCharCounter + 1 names.append(classified_text[i][0]) for i in range(len(charIndexes)): currIndex = charIndexes[i] if i == len(charIndexes) - 1: substrings.append(text[currIndex: ]) else: nextIndex = charIndexes[i + 1] substrings.append(text[currIndex: nextIndex]) return substrings, names def get_diet(substrings, names): ''' This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person. It will return a dictionary containing the dietary information for each person. ''' ''' "id" and "key" are used to make requests to the Edamam Food API, and they are obtained by registering for an account from Edamam. ''' id = '6bb24f34' key = 'bcd38e86ec9f271288974f431e0c94e6' diet = {} for name in names: if name not in diet: diet[name] = {} diet[name]['foods'] = [] diet[name]['quantities'] = [] diet[name]['allergens'] = [] diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # For each substring, find the person's name and update the person's dietary information using the foods in the substring. for i in range(len(substrings)): substring = substrings[i] name = names[i] # Instantiate foodProcessor. processor = foodProcessor.foodProcessor(key, id) ''' Get list of foods, foodURIs, measureURIs, and quantities for each food. foodURIs and measureURIs are used to get the nutrients for each food. ''' foods, foodIds, measureURIs, quantities = processor.get_food_list(substring) # Get allergens and nutrients from all foods. details = processor.get_food_details(foodIds, measureURIs) allergens = [] nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # Add the foods and quantities to the person's diet. diet[name]['foods'].extend(foods) diet[name]['quantities'].extend(quantities) # For each food, add the allergens and nutrients to the person's diet. for i in range(len(details)): food = details[i] diet[name]['allergens'].append(format_allergens(food['allergens'])) for nutrient in nutrients: diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient]) return diet def format_allergens(allergens): ''' This method concatenates the list of allergens in each food to a string. ''' if len(allergens) == 1: return allergens[0] algs = '' for i in range(len(allergens)): for j in range(len(allergens[i])): if j == len(allergens[i]) - 1: algs += allergens[i][j] if i != len(allergens) - 1: algs += ', ' else: algs += allergens[i][j] return algs def log_diet(diet, rawText): ''' This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet. It will also update everyone's summary log sheet. ''' # Instantiate foodLog flog = foodLog.foodLog() cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") credentials = flog.sheet_oauth() for name in diet: # ip contains the values that will be appended onto the next row of the Google Spreadsheet. ip = [] ip.append(date) ip.append(time) ip.append(rawText) ''' If the person consumed at least one food item, then construct a new row containing dietary information to be logged in the person's sheet. ''' if len(diet[name]['foods']) > 0: ip.append(diet[name]['foods'][0]) ip.append(diet[name]['quantities'][0]) if len(diet[name]['allergens'][0]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][0]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][0]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) for i in range(1, len(diet[name]['foods'])): ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]] if len(diet[name]['allergens'][i]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][i]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][i]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Construct a new row containing nutrient totals to be logged in the person's sheet. ip = ["", "", "", "", "", ""] for nutrient in diet[name]['nutrients']: total = 0 for quantity in diet[name]['nutrients'][nutrient]: total += quantity ip.append("Total: " + str(round(total, 1))) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # If the person did not consume any foods, then set the nutrient totals to 0 and update the person's sheet. else: ip.append("NONE") ip.append("NONE") ip.append("NONE") for nutrient in diet[name]['nutrients']: ip.append("Total: 0") payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Read the nutrient values from the person's sheet and update the person's summary log. values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, "A1:A10000") payload = flog.process_values(credentials, values, date, len(diet[name]['foods']), name) daily_log_name = name + "_Daily_Log" values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, "A1:A10000") index_date = 1 dateExists = False for i, j in enumerate(values): for d in j: if d == date: index_date = i + 1 dateExists = True flog.update_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, index_date, payload, dateExists) def main(): ''' Prompt the user to enter their name. Create a new sheet for the user if their sheet does not already exist in the spreadsheet. Then log the user's diet in their sheet and update the user's summary log. ''' username = input("Please enter your name: ") assistant = aiy.assistant.grpc.get_assistant() with aiy.audio.get_recorder(): aiy.audio.say('What food did you or your family members eat today?', lang="en-US") print('Listening...') text, audio = assistant.recognize() if text: # find date and time cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") print(text) textToUser = text.replace("I", "You") textToUser = textToUser.replace("me", "you") textToUser = textToUser.replace("my", "your") textToUser = textToUser.replace("My", "your") textToUser = textToUser.replace("mine", "yours") textToUser = textToUser.replace("Mine", "Yours") print(textToUser) aiy.audio.say(textToUser, lang="en-US") flog = foodLog.foodLog() credentials = flog.sheet_oauth() newText = convert_pronouns_to_names(text, username) substrings, names = get_substrings(newText) for name in names: # Create a new sheet for each person who does not already have a sheet in the Google Spreadsheet. if not flog.isInSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name): flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name) daily_log_name = name + "_Daily_Log" flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name) diet = get_diet(substrings, names) log_diet(diet, text) if __name__ == '__main__': main()
get_substrings
identifier_name
food.py
''' Raj Palleti Last revision: 8/16/19 This class instantiates foodProcessor to get each person's foods, allergens, and nutrients from the user's text, which is recognized using the Google Voice Kit. It then uses foodLog to log each person's dietary info into their own sheet in a Google Spreadsheet. ''' # Set the 'JAVAHOME' variable to your own path of jdk/bin/java. import os os.environ['JAVAHOME'] = "/usr/java/jdk1.8.0_202/bin/java" import aiy.assistant.grpc import aiy.audio import aiy.voicehat import sys from nltk.tag import StanfordNERTagger from nltk.tokenize import word_tokenize import gender_guesser.detector as gender import foodProcessor import foodLog from datetime import datetime from pytz import timezone def convert_pronouns_to_names(text, username): ''' This method uses Stanford NER and the gender-guesser package to convert pronouns in the text to their corresponding names. It returns the processed text after replacing all pronouns. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) det = gender.Detector() wordCount = len(tokenized_text) # Keep track of the most recent male and female names, which will be used to replace the pronouns "He" and "She". lastMaleName = '' lastFemaleName = '' index = 0 newText = '' for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if word == 'He' or word == 'he': if lastMaleName != '': newText = newText + lastMaleName else: newText = newText + word elif word == 'She' or word == 'she': if lastFemaleName != '': newText = newText + lastFemaleName else: newText = newText + word elif word == 'I': newText = newText + username else: newText = newText + word if partOfSpeech == 'PERSON':
index = index + len(word) if index < len(text) and text[index] == ' ': index = index + 1 newText += ' ' return newText def get_substrings(text): ''' This method splits the text into substrings, where each begins with a name and continues until reaching the next name. It will return the list of substrings and a list that contains the name in each substring. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) wordCount = len(tokenized_text) # charIndexes stores the starting indexes for each name from the text. charIndexes = [] charCounter = 0 newCharCounter = 0 substrings = [] names = [] for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if partOfSpeech == 'PERSON': newCharCounter = text.find(word, charCounter) charIndexes.append(newCharCounter) charCounter = newCharCounter + 1 names.append(classified_text[i][0]) for i in range(len(charIndexes)): currIndex = charIndexes[i] if i == len(charIndexes) - 1: substrings.append(text[currIndex: ]) else: nextIndex = charIndexes[i + 1] substrings.append(text[currIndex: nextIndex]) return substrings, names def get_diet(substrings, names): ''' This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person. It will return a dictionary containing the dietary information for each person. ''' ''' "id" and "key" are used to make requests to the Edamam Food API, and they are obtained by registering for an account from Edamam. ''' id = '6bb24f34' key = 'bcd38e86ec9f271288974f431e0c94e6' diet = {} for name in names: if name not in diet: diet[name] = {} diet[name]['foods'] = [] diet[name]['quantities'] = [] diet[name]['allergens'] = [] diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # For each substring, find the person's name and update the person's dietary information using the foods in the substring. for i in range(len(substrings)): substring = substrings[i] name = names[i] # Instantiate foodProcessor. processor = foodProcessor.foodProcessor(key, id) ''' Get list of foods, foodURIs, measureURIs, and quantities for each food. foodURIs and measureURIs are used to get the nutrients for each food. ''' foods, foodIds, measureURIs, quantities = processor.get_food_list(substring) # Get allergens and nutrients from all foods. details = processor.get_food_details(foodIds, measureURIs) allergens = [] nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # Add the foods and quantities to the person's diet. diet[name]['foods'].extend(foods) diet[name]['quantities'].extend(quantities) # For each food, add the allergens and nutrients to the person's diet. for i in range(len(details)): food = details[i] diet[name]['allergens'].append(format_allergens(food['allergens'])) for nutrient in nutrients: diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient]) return diet def format_allergens(allergens): ''' This method concatenates the list of allergens in each food to a string. ''' if len(allergens) == 1: return allergens[0] algs = '' for i in range(len(allergens)): for j in range(len(allergens[i])): if j == len(allergens[i]) - 1: algs += allergens[i][j] if i != len(allergens) - 1: algs += ', ' else: algs += allergens[i][j] return algs def log_diet(diet, rawText): ''' This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet. It will also update everyone's summary log sheet. ''' # Instantiate foodLog flog = foodLog.foodLog() cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") credentials = flog.sheet_oauth() for name in diet: # ip contains the values that will be appended onto the next row of the Google Spreadsheet. ip = [] ip.append(date) ip.append(time) ip.append(rawText) ''' If the person consumed at least one food item, then construct a new row containing dietary information to be logged in the person's sheet. ''' if len(diet[name]['foods']) > 0: ip.append(diet[name]['foods'][0]) ip.append(diet[name]['quantities'][0]) if len(diet[name]['allergens'][0]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][0]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][0]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) for i in range(1, len(diet[name]['foods'])): ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]] if len(diet[name]['allergens'][i]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][i]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][i]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Construct a new row containing nutrient totals to be logged in the person's sheet. ip = ["", "", "", "", "", ""] for nutrient in diet[name]['nutrients']: total = 0 for quantity in diet[name]['nutrients'][nutrient]: total += quantity ip.append("Total: " + str(round(total, 1))) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # If the person did not consume any foods, then set the nutrient totals to 0 and update the person's sheet. else: ip.append("NONE") ip.append("NONE") ip.append("NONE") for nutrient in diet[name]['nutrients']: ip.append("Total: 0") payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Read the nutrient values from the person's sheet and update the person's summary log. values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, "A1:A10000") payload = flog.process_values(credentials, values, date, len(diet[name]['foods']), name) daily_log_name = name + "_Daily_Log" values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, "A1:A10000") index_date = 1 dateExists = False for i, j in enumerate(values): for d in j: if d == date: index_date = i + 1 dateExists = True flog.update_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, index_date, payload, dateExists) def main(): ''' Prompt the user to enter their name. Create a new sheet for the user if their sheet does not already exist in the spreadsheet. Then log the user's diet in their sheet and update the user's summary log. ''' username = input("Please enter your name: ") assistant = aiy.assistant.grpc.get_assistant() with aiy.audio.get_recorder(): aiy.audio.say('What food did you or your family members eat today?', lang="en-US") print('Listening...') text, audio = assistant.recognize() if text: # find date and time cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") print(text) textToUser = text.replace("I", "You") textToUser = textToUser.replace("me", "you") textToUser = textToUser.replace("my", "your") textToUser = textToUser.replace("My", "your") textToUser = textToUser.replace("mine", "yours") textToUser = textToUser.replace("Mine", "Yours") print(textToUser) aiy.audio.say(textToUser, lang="en-US") flog = foodLog.foodLog() credentials = flog.sheet_oauth() newText = convert_pronouns_to_names(text, username) substrings, names = get_substrings(newText) for name in names: # Create a new sheet for each person who does not already have a sheet in the Google Spreadsheet. if not flog.isInSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name): flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name) daily_log_name = name + "_Daily_Log" flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name) diet = get_diet(substrings, names) log_diet(diet, text) if __name__ == '__main__': main()
if "female" in det.get_gender(word): lastFemaleName = word elif "male" in det.get_gender(word): lastMaleName = word
conditional_block
food.py
''' Raj Palleti Last revision: 8/16/19 This class instantiates foodProcessor to get each person's foods, allergens, and nutrients from the user's text, which is recognized using the Google Voice Kit. It then uses foodLog to log each person's dietary info into their own sheet in a Google Spreadsheet. ''' # Set the 'JAVAHOME' variable to your own path of jdk/bin/java. import os os.environ['JAVAHOME'] = "/usr/java/jdk1.8.0_202/bin/java" import aiy.assistant.grpc import aiy.audio import aiy.voicehat import sys from nltk.tag import StanfordNERTagger from nltk.tokenize import word_tokenize import gender_guesser.detector as gender import foodProcessor import foodLog from datetime import datetime from pytz import timezone def convert_pronouns_to_names(text, username): ''' This method uses Stanford NER and the gender-guesser package to convert pronouns in the text to their corresponding names. It returns the processed text after replacing all pronouns. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) det = gender.Detector() wordCount = len(tokenized_text) # Keep track of the most recent male and female names, which will be used to replace the pronouns "He" and "She". lastMaleName = '' lastFemaleName = '' index = 0 newText = '' for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if word == 'He' or word == 'he': if lastMaleName != '': newText = newText + lastMaleName else: newText = newText + word elif word == 'She' or word == 'she': if lastFemaleName != '': newText = newText + lastFemaleName else: newText = newText + word elif word == 'I': newText = newText + username else: newText = newText + word if partOfSpeech == 'PERSON': if "female" in det.get_gender(word): lastFemaleName = word elif "male" in det.get_gender(word): lastMaleName = word index = index + len(word) if index < len(text) and text[index] == ' ': index = index + 1 newText += ' ' return newText def get_substrings(text): ''' This method splits the text into substrings, where each begins with a name and continues until reaching the next name. It will return the list of substrings and a list that contains the name in each substring. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) wordCount = len(tokenized_text) # charIndexes stores the starting indexes for each name from the text. charIndexes = [] charCounter = 0 newCharCounter = 0 substrings = [] names = [] for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if partOfSpeech == 'PERSON': newCharCounter = text.find(word, charCounter) charIndexes.append(newCharCounter) charCounter = newCharCounter + 1 names.append(classified_text[i][0]) for i in range(len(charIndexes)): currIndex = charIndexes[i] if i == len(charIndexes) - 1: substrings.append(text[currIndex: ]) else: nextIndex = charIndexes[i + 1] substrings.append(text[currIndex: nextIndex]) return substrings, names def get_diet(substrings, names): ''' This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person. It will return a dictionary containing the dietary information for each person. ''' ''' "id" and "key" are used to make requests to the Edamam Food API, and they are obtained by registering for an account from Edamam. ''' id = '6bb24f34' key = 'bcd38e86ec9f271288974f431e0c94e6' diet = {} for name in names: if name not in diet: diet[name] = {} diet[name]['foods'] = [] diet[name]['quantities'] = [] diet[name]['allergens'] = [] diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # For each substring, find the person's name and update the person's dietary information using the foods in the substring. for i in range(len(substrings)): substring = substrings[i] name = names[i] # Instantiate foodProcessor. processor = foodProcessor.foodProcessor(key, id) ''' Get list of foods, foodURIs, measureURIs, and quantities for each food. foodURIs and measureURIs are used to get the nutrients for each food. ''' foods, foodIds, measureURIs, quantities = processor.get_food_list(substring) # Get allergens and nutrients from all foods. details = processor.get_food_details(foodIds, measureURIs) allergens = [] nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # Add the foods and quantities to the person's diet. diet[name]['foods'].extend(foods) diet[name]['quantities'].extend(quantities) # For each food, add the allergens and nutrients to the person's diet. for i in range(len(details)): food = details[i] diet[name]['allergens'].append(format_allergens(food['allergens'])) for nutrient in nutrients: diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient]) return diet def format_allergens(allergens): ''' This method concatenates the list of allergens in each food to a string. ''' if len(allergens) == 1: return allergens[0] algs = '' for i in range(len(allergens)): for j in range(len(allergens[i])): if j == len(allergens[i]) - 1: algs += allergens[i][j] if i != len(allergens) - 1: algs += ', ' else: algs += allergens[i][j] return algs def log_diet(diet, rawText): ''' This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet. It will also update everyone's summary log sheet. ''' # Instantiate foodLog flog = foodLog.foodLog() cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") credentials = flog.sheet_oauth() for name in diet: # ip contains the values that will be appended onto the next row of the Google Spreadsheet. ip = [] ip.append(date) ip.append(time) ip.append(rawText) ''' If the person consumed at least one food item, then construct a new row containing dietary information to be logged in the person's sheet. ''' if len(diet[name]['foods']) > 0: ip.append(diet[name]['foods'][0]) ip.append(diet[name]['quantities'][0]) if len(diet[name]['allergens'][0]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][0]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][0]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) for i in range(1, len(diet[name]['foods'])): ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]] if len(diet[name]['allergens'][i]) == 0: ip.append("NONE")
for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][i]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Construct a new row containing nutrient totals to be logged in the person's sheet. ip = ["", "", "", "", "", ""] for nutrient in diet[name]['nutrients']: total = 0 for quantity in diet[name]['nutrients'][nutrient]: total += quantity ip.append("Total: " + str(round(total, 1))) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # If the person did not consume any foods, then set the nutrient totals to 0 and update the person's sheet. else: ip.append("NONE") ip.append("NONE") ip.append("NONE") for nutrient in diet[name]['nutrients']: ip.append("Total: 0") payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Read the nutrient values from the person's sheet and update the person's summary log. values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, "A1:A10000") payload = flog.process_values(credentials, values, date, len(diet[name]['foods']), name) daily_log_name = name + "_Daily_Log" values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, "A1:A10000") index_date = 1 dateExists = False for i, j in enumerate(values): for d in j: if d == date: index_date = i + 1 dateExists = True flog.update_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, index_date, payload, dateExists) def main(): ''' Prompt the user to enter their name. Create a new sheet for the user if their sheet does not already exist in the spreadsheet. Then log the user's diet in their sheet and update the user's summary log. ''' username = input("Please enter your name: ") assistant = aiy.assistant.grpc.get_assistant() with aiy.audio.get_recorder(): aiy.audio.say('What food did you or your family members eat today?', lang="en-US") print('Listening...') text, audio = assistant.recognize() if text: # find date and time cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") print(text) textToUser = text.replace("I", "You") textToUser = textToUser.replace("me", "you") textToUser = textToUser.replace("my", "your") textToUser = textToUser.replace("My", "your") textToUser = textToUser.replace("mine", "yours") textToUser = textToUser.replace("Mine", "Yours") print(textToUser) aiy.audio.say(textToUser, lang="en-US") flog = foodLog.foodLog() credentials = flog.sheet_oauth() newText = convert_pronouns_to_names(text, username) substrings, names = get_substrings(newText) for name in names: # Create a new sheet for each person who does not already have a sheet in the Google Spreadsheet. if not flog.isInSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name): flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name) daily_log_name = name + "_Daily_Log" flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name) diet = get_diet(substrings, names) log_diet(diet, text) if __name__ == '__main__': main()
else: ip.append(diet[name]['allergens'][i])
random_line_split
food.py
''' Raj Palleti Last revision: 8/16/19 This class instantiates foodProcessor to get each person's foods, allergens, and nutrients from the user's text, which is recognized using the Google Voice Kit. It then uses foodLog to log each person's dietary info into their own sheet in a Google Spreadsheet. ''' # Set the 'JAVAHOME' variable to your own path of jdk/bin/java. import os os.environ['JAVAHOME'] = "/usr/java/jdk1.8.0_202/bin/java" import aiy.assistant.grpc import aiy.audio import aiy.voicehat import sys from nltk.tag import StanfordNERTagger from nltk.tokenize import word_tokenize import gender_guesser.detector as gender import foodProcessor import foodLog from datetime import datetime from pytz import timezone def convert_pronouns_to_names(text, username): ''' This method uses Stanford NER and the gender-guesser package to convert pronouns in the text to their corresponding names. It returns the processed text after replacing all pronouns. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) det = gender.Detector() wordCount = len(tokenized_text) # Keep track of the most recent male and female names, which will be used to replace the pronouns "He" and "She". lastMaleName = '' lastFemaleName = '' index = 0 newText = '' for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if word == 'He' or word == 'he': if lastMaleName != '': newText = newText + lastMaleName else: newText = newText + word elif word == 'She' or word == 'she': if lastFemaleName != '': newText = newText + lastFemaleName else: newText = newText + word elif word == 'I': newText = newText + username else: newText = newText + word if partOfSpeech == 'PERSON': if "female" in det.get_gender(word): lastFemaleName = word elif "male" in det.get_gender(word): lastMaleName = word index = index + len(word) if index < len(text) and text[index] == ' ': index = index + 1 newText += ' ' return newText def get_substrings(text): ''' This method splits the text into substrings, where each begins with a name and continues until reaching the next name. It will return the list of substrings and a list that contains the name in each substring. ''' # Set your own path for the classification model and Stanford tagged jar file of StanfordNERTagger. st = StanfordNERTagger( '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/classifiers/english.all.3class.distsim.crf.ser.gz', '/home/pi/AIY-voice-kit-python/src/examples/voice/app/stanford-ner-2018-10-16/stanford-ner.jar', encoding='utf-8') tokenized_text = word_tokenize(text) classified_text = st.tag(tokenized_text) wordCount = len(tokenized_text) # charIndexes stores the starting indexes for each name from the text. charIndexes = [] charCounter = 0 newCharCounter = 0 substrings = [] names = [] for i in range(wordCount): word = classified_text[i][0] partOfSpeech = classified_text[i][1] if partOfSpeech == 'PERSON': newCharCounter = text.find(word, charCounter) charIndexes.append(newCharCounter) charCounter = newCharCounter + 1 names.append(classified_text[i][0]) for i in range(len(charIndexes)): currIndex = charIndexes[i] if i == len(charIndexes) - 1: substrings.append(text[currIndex: ]) else: nextIndex = charIndexes[i + 1] substrings.append(text[currIndex: nextIndex]) return substrings, names def get_diet(substrings, names): ''' This method uses the substrings to determine the foods, nutrients, and allergens consumed by each person. It will return a dictionary containing the dietary information for each person. ''' ''' "id" and "key" are used to make requests to the Edamam Food API, and they are obtained by registering for an account from Edamam. ''' id = '6bb24f34' key = 'bcd38e86ec9f271288974f431e0c94e6' diet = {} for name in names: if name not in diet: diet[name] = {} diet[name]['foods'] = [] diet[name]['quantities'] = [] diet[name]['allergens'] = [] diet[name]['nutrients'] = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # For each substring, find the person's name and update the person's dietary information using the foods in the substring. for i in range(len(substrings)): substring = substrings[i] name = names[i] # Instantiate foodProcessor. processor = foodProcessor.foodProcessor(key, id) ''' Get list of foods, foodURIs, measureURIs, and quantities for each food. foodURIs and measureURIs are used to get the nutrients for each food. ''' foods, foodIds, measureURIs, quantities = processor.get_food_list(substring) # Get allergens and nutrients from all foods. details = processor.get_food_details(foodIds, measureURIs) allergens = [] nutrients = {"Energy": [], "Fat": [], "Carbs": [], "Fiber": [], "Sugars": [], "Protein": [], "Sodium": [], "Calcium": [], "Magnesium": [], "Potassium": [], "Iron": [], "Vitamin C": [], "Vitamin E": [], "Vitamin K": []} # Add the foods and quantities to the person's diet. diet[name]['foods'].extend(foods) diet[name]['quantities'].extend(quantities) # For each food, add the allergens and nutrients to the person's diet. for i in range(len(details)): food = details[i] diet[name]['allergens'].append(format_allergens(food['allergens'])) for nutrient in nutrients: diet[name]['nutrients'][nutrient].append(food["nutrients"][nutrient]) return diet def format_allergens(allergens):
def log_diet(diet, rawText): ''' This method uses the diet dictionary to log the dietary information for each person in the corresponding sheet. It will also update everyone's summary log sheet. ''' # Instantiate foodLog flog = foodLog.foodLog() cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") credentials = flog.sheet_oauth() for name in diet: # ip contains the values that will be appended onto the next row of the Google Spreadsheet. ip = [] ip.append(date) ip.append(time) ip.append(rawText) ''' If the person consumed at least one food item, then construct a new row containing dietary information to be logged in the person's sheet. ''' if len(diet[name]['foods']) > 0: ip.append(diet[name]['foods'][0]) ip.append(diet[name]['quantities'][0]) if len(diet[name]['allergens'][0]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][0]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][0]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) for i in range(1, len(diet[name]['foods'])): ip = ["", "", "", diet[name]['foods'][i], diet[name]['quantities'][i]] if len(diet[name]['allergens'][i]) == 0: ip.append("NONE") else: ip.append(diet[name]['allergens'][i]) for nutrient in diet[name]['nutrients']: ip.append(diet[name]['nutrients'][nutrient][i]) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Construct a new row containing nutrient totals to be logged in the person's sheet. ip = ["", "", "", "", "", ""] for nutrient in diet[name]['nutrients']: total = 0 for quantity in diet[name]['nutrients'][nutrient]: total += quantity ip.append("Total: " + str(round(total, 1))) payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # If the person did not consume any foods, then set the nutrient totals to 0 and update the person's sheet. else: ip.append("NONE") ip.append("NONE") ip.append("NONE") for nutrient in diet[name]['nutrients']: ip.append("Total: 0") payload = {"values": [ip]} flog.write_to_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, payload) # Read the nutrient values from the person's sheet and update the person's summary log. values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name, "A1:A10000") payload = flog.process_values(credentials, values, date, len(diet[name]['foods']), name) daily_log_name = name + "_Daily_Log" values = flog.readSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, "A1:A10000") index_date = 1 dateExists = False for i, j in enumerate(values): for d in j: if d == date: index_date = i + 1 dateExists = True flog.update_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name, index_date, payload, dateExists) def main(): ''' Prompt the user to enter their name. Create a new sheet for the user if their sheet does not already exist in the spreadsheet. Then log the user's diet in their sheet and update the user's summary log. ''' username = input("Please enter your name: ") assistant = aiy.assistant.grpc.get_assistant() with aiy.audio.get_recorder(): aiy.audio.say('What food did you or your family members eat today?', lang="en-US") print('Listening...') text, audio = assistant.recognize() if text: # find date and time cupertino = timezone('US/Pacific') now = datetime.now(cupertino) date = now.strftime("%B %d, %Y") time = now.strftime("%I:%M %p") print(text) textToUser = text.replace("I", "You") textToUser = textToUser.replace("me", "you") textToUser = textToUser.replace("my", "your") textToUser = textToUser.replace("My", "your") textToUser = textToUser.replace("mine", "yours") textToUser = textToUser.replace("Mine", "Yours") print(textToUser) aiy.audio.say(textToUser, lang="en-US") flog = foodLog.foodLog() credentials = flog.sheet_oauth() newText = convert_pronouns_to_names(text, username) substrings, names = get_substrings(newText) for name in names: # Create a new sheet for each person who does not already have a sheet in the Google Spreadsheet. if not flog.isInSheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name): flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', name) daily_log_name = name + "_Daily_Log" flog.create_new_sheet(credentials, '1GxFpWhwISzni7DWviFzH500k9eFONpSGQ8uJ0-kBKY4', daily_log_name) diet = get_diet(substrings, names) log_diet(diet, text) if __name__ == '__main__': main()
''' This method concatenates the list of allergens in each food to a string. ''' if len(allergens) == 1: return allergens[0] algs = '' for i in range(len(allergens)): for j in range(len(allergens[i])): if j == len(allergens[i]) - 1: algs += allergens[i][j] if i != len(allergens) - 1: algs += ', ' else: algs += allergens[i][j] return algs
identifier_body
parser.go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken
// we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
{ break // we are finished }
conditional_block
parser.go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func
(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
trace
identifier_name
parser.go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs).
node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{}) { if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) } func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
// The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList"))
random_line_split
parser.go
// Package parser implements a parser for HCL (HashiCorp Configuration // Language) package parser import ( "bytes" "errors" "fmt" "strings" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/hcl/hcl/scanner" "github.com/hashicorp/hcl/hcl/token" ) type Parser struct { sc *scanner.Scanner // Last read token tok token.Token commaPrev token.Token comments []*ast.CommentGroup leadComment *ast.CommentGroup // last lead comment lineComment *ast.CommentGroup // last line comment enableTrace bool indent int n int // buffer size (max = 1) } func newParser(src []byte) *Parser { return &Parser{ sc: scanner.New(src), } } // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { // normalize all line endings // since the scanner and output only work with "\n" line endings, we may // end up with dangling "\r" characters in the parsed data. src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) p := newParser(src) return p.Parse() } var errEofToken = errors.New("EOF token found") // Parse returns the fully parsed source and returns the abstract syntax tree. func (p *Parser) Parse() (*ast.File, error) { f := &ast.File{} var err, scerr error p.sc.Error = func(pos token.Pos, msg string) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } if err != nil { return nil, err } f.Comments = p.comments return f, nil } // objectList parses a list of items within an object (generally k/v pairs). // The parameter" obj" tells this whether to we are within an object (braces: // '{', '}') or just at the top level. If we're within an object, we end // at an RBRACE. func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { if obj { tok := p.scan() p.unscan() if tok.Type == token.RBRACE { break } } n, err := p.objectItem() if err == errEofToken { break // we are finished } // we don't return a nil node, because might want to use already // collected items. if err != nil { return node, err } node.Add(n) // object lists can be optionally comma-delimited e.g. when a list of maps // is being expressed, so a comma is allowed here - it's simply consumed tok := p.scan() if tok.Type != token.COMMA { p.unscan() } } return node, nil } func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { endline = p.tok.Pos.Line // count the endline if it's multiline comment, ie starting with /* if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { // don't use range here - no need to decode Unicode code points for i := 0; i < len(p.tok.Text); i++ { if p.tok.Text[i] == '\n' { endline++ } } } comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} p.tok = p.sc.Scan() return } func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { var list []*ast.Comment endline = p.tok.Pos.Line for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { var comment *ast.Comment comment, endline = p.consumeComment() list = append(list, comment) } // add comment group to the comments list comments = &ast.CommentGroup{List: list} p.comments = append(p.comments, comments) return } // objectItem parses a single object item func (p *Parser) objectItem() (*ast.ObjectItem, error) { defer un(trace(p, "ParseObjectItem")) keys, err := p.objectKey() if len(keys) > 0 && err == errEofToken { // We ignore eof token here since it is an error if we didn't // receive a value (but we did receive a key) for the item. err = nil } if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { // This is a strange boolean statement, but what it means is: // We have keys with no value, and we're likely in an object // (since RBrace ends an object). For this, we set err to nil so // we continue and get the error below of having the wrong value // type. err = nil // Reset the token type so we don't think it completed fine. See // objectType which uses p.tok.Type to check if we're done with // the object. p.tok.Type = token.EOF } if err != nil { return nil, err } o := &ast.ObjectItem{ Keys: keys, } if p.leadComment != nil { o.LeadComment = p.leadComment p.leadComment = nil } switch p.tok.Type { case token.ASSIGN: o.Assign = p.tok.Pos o.Val, err = p.object() if err != nil { return nil, err } case token.LBRACE: o.Val, err = p.objectType() if err != nil { return nil, err } default: keyStr := make([]string, 0, len(keys)) for _, k := range keys { keyStr = append(keyStr, k.Token.Text) } return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf( "key '%s' expected start of object ('{') or assignment ('=')", strings.Join(keyStr, " ")), } } // key=#comment // val if p.lineComment != nil { o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { o.LineComment = p.lineComment p.lineComment = nil } p.unscan() return o, nil } // objectKey parses an object key and returns a ObjectKey AST func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount := 0 keys := make([]*ast.ObjectKey, 0) for { tok := p.scan() switch tok.Type { case token.EOF: // It is very important to also return the keys here as well as // the error. This is because we need to be able to tell if we // did parse keys prior to finding the EOF, or if we just found // a bare EOF. return keys, errEofToken case token.ASSIGN: // assignment or object only, but not nested objects. this is not // allowed: `foo bar = {}` if keyCount > 1 { return nil, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), } } if keyCount == 0 { return nil, &PosError{ Pos: p.tok.Pos, Err: errors.New("no object keys found!"), } } return keys, nil case token.LBRACE: var err error // If we have no keys, then it is a syntax error. i.e. {{}} is not // allowed. if len(keys) == 0 { err = &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), } } // object return keys, err case token.IDENT, token.STRING: keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("illegal character"), } default: return keys, &PosError{ Pos: p.tok.Pos, Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), } } } } // object parses any type of object, such as number, bool, string, object or // list. func (p *Parser) object() (ast.Node, error) { defer un(trace(p, "ParseType")) tok := p.scan() switch tok.Type { case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: return p.literalType() case token.LBRACE: return p.objectType() case token.LBRACK: return p.listType() case token.COMMENT: // implement comment case token.EOF: return nil, errEofToken } return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("Unknown token: %+v", tok), } } // objectType parses an object type and returns a ObjectType AST func (p *Parser) objectType() (*ast.ObjectType, error) { defer un(trace(p, "ParseObjectType")) // we assume that the currently scanned token is a LBRACE o := &ast.ObjectType{ Lbrace: p.tok.Pos, } l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. if err != nil && p.tok.Type != token.RBRACE { return nil, err } // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), } } o.List = l o.Rbrace = p.tok.Pos // advanced via parseObjectList return o, nil } // listType parses a list type and returns a ListType AST func (p *Parser) listType() (*ast.ListType, error) { defer un(trace(p, "ParseListType")) // we assume that the currently scanned token is a LBRACK l := &ast.ListType{ Lbrack: p.tok.Pos, } needComma := false for { tok := p.scan() if needComma { switch tok.Type { case token.COMMA, token.RBRACK: default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error parsing list, expected comma or list end, got: %s", tok.Type), } } } switch tok.Type { case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } // If there is a lead comment, apply it if p.leadComment != nil { node.LeadComment = p.leadComment p.leadComment = nil } l.Add(node) needComma = true case token.COMMA: // get next list item or we are at the end // do a look-ahead for line comment p.scan() if p.lineComment != nil && len(l.List) > 0 { lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) if ok { lit.LineComment = p.lineComment l.List[len(l.List)-1] = lit p.lineComment = nil } } p.unscan() needComma = false continue case token.LBRACE: // Looks like a nested object, so parse it out node, err := p.objectType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse object within list: %s", err), } } l.Add(node) needComma = true case token.LBRACK: node, err := p.listType() if err != nil { return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf( "error while trying to parse list within list: %s", err), } } l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos return l, nil default: return nil, &PosError{ Pos: tok.Pos, Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), } } } } // literalType parses a literal type and returns a LiteralType AST func (p *Parser) literalType() (*ast.LiteralType, error) { defer un(trace(p, "ParseLiteral")) return &ast.LiteralType{ Token: p.tok, }, nil } // scan returns the next token from the underlying scanner. If a token has // been unscanned then read that instead. In the process, it collects any // comment groups encountered, and remembers the last lead and line comments. func (p *Parser) scan() token.Token { // If we have a token on the buffer, then return it. if p.n != 0 { p.n = 0 return p.tok } // Otherwise read the next token from the scanner and Save it to the buffer // in case we unscan later. prev := p.tok p.tok = p.sc.Scan() if p.tok.Type == token.COMMENT { var comment *ast.CommentGroup var endline int // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", // p.tok.Pos.Line, prev.Pos.Line, endline) if p.tok.Pos.Line == prev.Pos.Line { // The comment is on same line as the previous token; it // cannot be a lead comment but may be a line comment. comment, endline = p.consumeCommentGroup(0) if p.tok.Pos.Line != endline { // The next token is on a different line, thus // the last comment group is a line comment. p.lineComment = comment } } // consume successor comments, if any endline = -1 for p.tok.Type == token.COMMENT { comment, endline = p.consumeCommentGroup(1) } if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { switch p.tok.Type { case token.RBRACE, token.RBRACK: // Do not count for these cases default: // The next token is following on the line immediately after the // comment group, thus the last comment group is a lead comment. p.leadComment = comment } } } return p.tok } // unscan pushes the previously read token back onto the buffer. func (p *Parser) unscan() { p.n = 1 } // ---------------------------------------------------------------------------- // Parsing support func (p *Parser) printTrace(a ...interface{})
func trace(p *Parser, msg string) *Parser { p.printTrace(msg, "(") p.indent++ return p } // Usage pattern: defer un(trace(p, "...")) func un(p *Parser) { p.indent-- p.printTrace(")") }
{ if !p.enableTrace { return } const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " const n = len(dots) fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) i := 2 * p.indent for i > n { fmt.Print(dots) i -= n } // i <= n fmt.Print(dots[0:i]) fmt.Println(a...) }
identifier_body
dom-movement.ts
/** * This is required to work around a problem when extending built-in classes * like ``Error``. Some of the constructors for these classes return a value * from the constructor, which is then picked up by the constructors generated * by TypeScript (same with ES6 code transpiled through Babel), and this messes * up the inheritance chain. * * See https://github.com/Microsoft/TypeScript/issues/12123. */ // tslint:disable-next-line:ban-types export function fixPrototype(obj: any, parent: Function): void { // getPrototypeOf is supported as far back as IE9 const oldProto = Object.getPrototypeOf(obj); if (oldProto !== parent) { // setPrototypeOf is supported as far back as IE11 if (Object.setPrototypeOf !== undefined) { Object.setPrototypeOf(obj, parent.prototype); } else { obj.__proto__ = parent.prototype; } } } /** * This error is raised when a location is passed to a [[DOMSpace]] instance and * the location is not within the space. */ export class DOMSpaceScopeError extends Error { constructor() { super("location is not within the space"); fixPrototype(this, DOMSpaceScopeError); } } /** * This error is raised when a location into an irrelevant node cannot be * escaped. */ export class CannotEscapeIrrelevantNode extends Error { constructor() { super("location is irrelevant and cannot be escaped"); fixPrototype(this, CannotEscapeIrrelevantNode); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ReversedRangeError extends Error { constructor() { super("tried to use a reversed range"); fixPrototype(this, ReversedRangeError); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ComparingDisconnectedNodes extends Error { constructor() { super("cannot compare disconnected nodes"); fixPrototype(this, ComparingDisconnectedNodes); } } /** * Compare a ``[node, offset]`` location with another node which is known to be * a child of ``node``. * * @param node The node of the location. * * @param offset The offset of the location. * * @param child Another node to compare with ``[node, offset]`` which we already * know is a child of ``node``. * * @returns -1 if ``[node, offset]`` is before ``child``, 1 otherwise. */ function pointedCompare(node: Node, offset: number, child: Node): 1 | 0 | -1 { const pointed = node.childNodes[offset]; if (pointed === undefined) { // Undefined means we are after all other elements. (A negative offset, // before all nodes, is not possible here.) return 1; } // We return -1 when pointed === child because the actual position we care // about is *inside* child. Since it is inside child, ``[node, offset]`` // necessarily precedes that location. return pointed === child || // tslint:disable-next-line:no-bitwise (pointed.compareDocumentPosition(child) & Node.DOCUMENT_POSITION_FOLLOWING) !== 0 ? -1 : // child follows pointed 1; // child is before pointed } /** * Models a DOM location. A DOM location is a pair of node and offset. * * In theory it would be possible to support nodes of any type, but this library * currently only supports only ``Element``, ``Document``, ``DocumentFragment``, * and ``Text`` for the node. * * Consider the following example: * * <p>I am a <b>little</b> teapot.</p> * * A location of ``(p, 0)`` points to the first text node of the top * level ``p`` element. * * A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text * node inside ``p``. * * A location of ``(p.childNodes[0], 7)`` points to the end of the first text * node inside ``p``. This is a location after all the text in the node. * * A location of ``(p, 1)`` points to the ``b`` element inside ``p``. */ export class DOMLoc { constructor(readonly node: Node, readonly offset: number) { if (offset < 0) { throw new Error("offset cannot be negative"); } } static makePointingTo(node: Node): DOMLoc { const parent = node.parentNode; if (parent === null) { throw new Error("cannot point a node without a parent"); } return new DOMLoc(parent, indexOf(parent.childNodes, node)); } /** * @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal * to those of this location. Otherwise, return ``this``. */ newIfDifferent(node: Node, offset: number): DOMLoc { return (this.node === node && this.offset === offset) ? this : new DOMLoc(node, offset); } /** * This is the node to which this location points. When the location points to * a text node, the pointed node is the text node. When the location points to * anything else, the pointed node is the child node at the offset of the * location. This may be undefined when the location points beyond the last * child. */ get pointedNode(): Node | null { const { node } = this; if (node.nodeType === Node.TEXT_NODE) { return node; } const pointed = node.childNodes[this.offset]; return pointed === undefined ? null : pointed; } /** * The offset contained by this location, but normalized. An offset pointing * beyond the end of the node's data will be normalized to point at the end of * the node. */ get normalizedOffset(): number { const { offset, node } = this; switch (node.nodeType) { case Node.DOCUMENT_NODE: case Node.DOCUMENT_FRAGMENT_NODE: case Node.ELEMENT_NODE: { const { childNodes: { length } } = node; return offset > length ? length : offset; } case Node.TEXT_NODE: { const { length } = node as Text; return offset > length ? length : offset; } default: throw new Error(`cannot normalize offset in a node of type: \ ${node.nodeType}`); } } /** * ``true`` if the location is already normalized. ``false`` if not. */ get isNormalized(): boolean { return this.offset === this.normalizedOffset; } /** * Convert a location with an offset which is out of bounds, to a location * with an offset within bounds. * * An offset less than 0 will be normalized to 0. An offset pointing beyond * the end of the node's data will be normalized to point at the end of the * node. * * @returns A new [[Location]], if the offset was adjusted. Otherwise, it * returns ``this``. */ normalizeOffset(): DOMLoc { const normalized = this.normalizedOffset; const { offset, node } = this; return normalized === offset ? this : new DOMLoc(node, normalized); } /** * Determine whether this location and another location are equal. * * @returns Whether ``this`` and ``other`` are equal. */ equals(other: DOMLoc | undefined | null): boolean { return other != null && (this === other || (this.node === other.node && this.offset === other.offset)); } /** * Compare this location with another in document order. * * @param other The other location to compare. * * @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two * locations are equal, 1 if ``this`` is later than ``other``. * * @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected" * (i.e. do not belong to the same document). */ compare(other: DOMLoc): -1 | 0 | 1 { if (this.equals(other)) { return 0; } const { node, offset } = this; const { node: otherNode, offset: otherOffset } = other; if (node === otherNode) { // The case where offset === otherOffset cannot happen here because it is // covered above. return offset - otherOffset < 0 ? -1 : 1; } const result = node.compareDocumentPosition(otherNode); // tslint:disable:no-bitwise if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) { throw new ComparingDisconnectedNodes(); } if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) { // otherNode follows node. return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ? // otherNode is contained by node but we still need to figure out the // relative positions of the node pointed by [node, offset] and // otherNode. pointedCompare(node, offset, otherNode) : // otherNode just follows node, no parent child relation -1; } if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("neither preceding nor following: this should not \ happen"); } // otherNode precedes node. return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 && // otherNode contains node but we still need to figure out the // relative positions of the node pointed by [otherNode, // otherOffset] and node. pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1; // tslint:enable:no-bitwise } } type NodeTest = (node: Node) => boolean; // tslint:disable-next-line:no-any function indexOf(arrayLike: any, el: any): number { return Array.prototype.indexOf.call(arrayLike, el); } /** * A space delimits a part of a DOM tree in which one can obtain locations. */ export class DOMSpace implements Iterable<DOMLoc> { /** * @param min The minimum location included in this space. * * @param max The maximum location included in this space. * * @param relevanceTest A test to determine whether a node is relevant. This * space does not produce locations into irrelevant nodes. * * @throws {CannotEscapeIrrelevantNode} If the container is irrelevant. * * @throw {ReversedRangeError} If ``max`` is less than ``min``. */ constructor(readonly min: DOMLoc, readonly max: DOMLoc, readonly relevanceTest: NodeTest = () => true) { if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) { throw new CannotEscapeIrrelevantNode(); } // Man could be equal to min but it cannot be less than min. if (max.compare(min) < 0) { throw new ReversedRangeError(); } } static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace { return new DOMSpace(new DOMLoc(node, 0), new DOMLoc(node, node.childNodes.length), relevanceTest); } /** * Test whether a node is contextually relevant. This method runs some stock * tests and if necessary calls [[Space.relevanceTest]]. * * @param node The node to test. * * @returns ``true`` if the node is contextually relevant, ``false`` if not. */ isRelevant(node: Node): boolean { const { nodeType } = node; return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE || nodeType === Node.DOCUMENT_NODE || nodeType === Node.DOCUMENT_FRAGMENT_NODE) && this.relevanceTest(node); } /** * Determine whether this space contains a location. * * @param loc The location to test. * * @returns Whether the location is inside the space. */ contains(loc: DOMLoc): boolean { try { return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0; } catch (ex) { if (ex instanceof ComparingDisconnectedNodes) { return false; } /* istanbul ignore next: there's currently no way to get here */ throw ex; } } /** * Determine whether this space contains a node. * * @param node The node to test. * * @returns Whether the node is inside the space. */ containsNode(node: Node): boolean { return node.parentNode !== null && this.contains(DOMLoc.makePointingTo(node)); } /** * If the current location is irrelevant node, then produce a new relevant * location pointing to the contextually irrelevant node. This is "escaping" * the node in the sense that the location provided by this method is pointing * at the irrelevant node *from outside*. * * This method also normalizes the location. * * @param location The location to escape. * * @returns If ``location`` was already relevant, and already normalized, then * return ``location``. Otherwise, the new relevant location. * * @throws {DOMSpaceScopeError} If ``location`` is not within the space. */ escapeIrrelevantNode(location: DOMLoc): DOMLoc { if (!this.contains(location)) { throw new DOMSpaceScopeError(); } const normalized = location.normalizeOffset(); let node: Node | null = normalized.node; const ancestorsAndSelf: Node[] = []; while (node !== null && this.containsNode(node)) { ancestorsAndSelf.push(node); node = node.parentNode; } // We reverse the nodes to scan them form topmost node down to the original // location. const reversed = ancestorsAndSelf.reverse(); const first = reversed[0]; for (const candidate of reversed) { if (!this.isRelevant(candidate)) { // parentNode cannot be null, unless we are the first in the array. // tslint:disable-next-line:no-non-null-assertion const parentNode = candidate.parentNode!; // If this is the first candidate, then the parent is outside the // container, and we cannot use it. We don't have a good location to // return. This should never happen because the container is required to // be relevant. if (candidate === first) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("internal error: we should always be able to escape \ a location which is inside the space"); } return new DOMLoc(parentNode, indexOf(parentNode.childNodes, candidate)); } } // None of the ancestors or the node itself were irrelevant, so the original // location was fine. return normalized; } /** * Compute the next relevant location from a starting point. * * @param start The location from which to start. * * @returns The next relevant location. Or ``null`` if there is no next * relevant location within the space. Remember: the *location* is relevant, * but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ next(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[offset++]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, 0) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (++offset <= (node as Text).length) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling after our starting node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parent we encounter is necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node) + 1); } return this.contains(loc) ? loc : null; } /** * Compute the previous relevant location from a starting point. * * @param start The location from which to start. * * @returns The previous relevant location. Or ``null`` if there is no * previous relevant location inside the space. Remember: the *location* is * relevant, but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ previous(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[--offset]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, pointedNode.nodeType === Node.TEXT_NODE ? (pointedNode as Text).length : pointedNode.childNodes.length) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (--offset >= 0) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling before our starting // node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parents we encounter are necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node)); } return this.contains(loc) ? loc : null; } /** * Produce an iterable iterator that iterates in document order. */ *[Symbol.iterator](): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.min; do { yield current; current = this.next(current); } while (current !== null); } /** * Produce an iterable iterator that iterates in reverse document order. */ *reversed(): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.max; do
while (current !== null); } }
{ yield current; current = this.previous(current); }
conditional_block
dom-movement.ts
/** * This is required to work around a problem when extending built-in classes * like ``Error``. Some of the constructors for these classes return a value * from the constructor, which is then picked up by the constructors generated * by TypeScript (same with ES6 code transpiled through Babel), and this messes * up the inheritance chain. * * See https://github.com/Microsoft/TypeScript/issues/12123. */ // tslint:disable-next-line:ban-types export function fixPrototype(obj: any, parent: Function): void { // getPrototypeOf is supported as far back as IE9 const oldProto = Object.getPrototypeOf(obj); if (oldProto !== parent) { // setPrototypeOf is supported as far back as IE11 if (Object.setPrototypeOf !== undefined) { Object.setPrototypeOf(obj, parent.prototype); } else { obj.__proto__ = parent.prototype; } } } /** * This error is raised when a location is passed to a [[DOMSpace]] instance and * the location is not within the space. */ export class DOMSpaceScopeError extends Error { constructor() { super("location is not within the space"); fixPrototype(this, DOMSpaceScopeError); } } /** * This error is raised when a location into an irrelevant node cannot be * escaped. */ export class CannotEscapeIrrelevantNode extends Error { constructor() { super("location is irrelevant and cannot be escaped"); fixPrototype(this, CannotEscapeIrrelevantNode); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ReversedRangeError extends Error { constructor() { super("tried to use a reversed range"); fixPrototype(this, ReversedRangeError); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ComparingDisconnectedNodes extends Error { constructor() { super("cannot compare disconnected nodes"); fixPrototype(this, ComparingDisconnectedNodes); } } /** * Compare a ``[node, offset]`` location with another node which is known to be * a child of ``node``. * * @param node The node of the location. * * @param offset The offset of the location. * * @param child Another node to compare with ``[node, offset]`` which we already * know is a child of ``node``. * * @returns -1 if ``[node, offset]`` is before ``child``, 1 otherwise. */ function
(node: Node, offset: number, child: Node): 1 | 0 | -1 { const pointed = node.childNodes[offset]; if (pointed === undefined) { // Undefined means we are after all other elements. (A negative offset, // before all nodes, is not possible here.) return 1; } // We return -1 when pointed === child because the actual position we care // about is *inside* child. Since it is inside child, ``[node, offset]`` // necessarily precedes that location. return pointed === child || // tslint:disable-next-line:no-bitwise (pointed.compareDocumentPosition(child) & Node.DOCUMENT_POSITION_FOLLOWING) !== 0 ? -1 : // child follows pointed 1; // child is before pointed } /** * Models a DOM location. A DOM location is a pair of node and offset. * * In theory it would be possible to support nodes of any type, but this library * currently only supports only ``Element``, ``Document``, ``DocumentFragment``, * and ``Text`` for the node. * * Consider the following example: * * <p>I am a <b>little</b> teapot.</p> * * A location of ``(p, 0)`` points to the first text node of the top * level ``p`` element. * * A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text * node inside ``p``. * * A location of ``(p.childNodes[0], 7)`` points to the end of the first text * node inside ``p``. This is a location after all the text in the node. * * A location of ``(p, 1)`` points to the ``b`` element inside ``p``. */ export class DOMLoc { constructor(readonly node: Node, readonly offset: number) { if (offset < 0) { throw new Error("offset cannot be negative"); } } static makePointingTo(node: Node): DOMLoc { const parent = node.parentNode; if (parent === null) { throw new Error("cannot point a node without a parent"); } return new DOMLoc(parent, indexOf(parent.childNodes, node)); } /** * @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal * to those of this location. Otherwise, return ``this``. */ newIfDifferent(node: Node, offset: number): DOMLoc { return (this.node === node && this.offset === offset) ? this : new DOMLoc(node, offset); } /** * This is the node to which this location points. When the location points to * a text node, the pointed node is the text node. When the location points to * anything else, the pointed node is the child node at the offset of the * location. This may be undefined when the location points beyond the last * child. */ get pointedNode(): Node | null { const { node } = this; if (node.nodeType === Node.TEXT_NODE) { return node; } const pointed = node.childNodes[this.offset]; return pointed === undefined ? null : pointed; } /** * The offset contained by this location, but normalized. An offset pointing * beyond the end of the node's data will be normalized to point at the end of * the node. */ get normalizedOffset(): number { const { offset, node } = this; switch (node.nodeType) { case Node.DOCUMENT_NODE: case Node.DOCUMENT_FRAGMENT_NODE: case Node.ELEMENT_NODE: { const { childNodes: { length } } = node; return offset > length ? length : offset; } case Node.TEXT_NODE: { const { length } = node as Text; return offset > length ? length : offset; } default: throw new Error(`cannot normalize offset in a node of type: \ ${node.nodeType}`); } } /** * ``true`` if the location is already normalized. ``false`` if not. */ get isNormalized(): boolean { return this.offset === this.normalizedOffset; } /** * Convert a location with an offset which is out of bounds, to a location * with an offset within bounds. * * An offset less than 0 will be normalized to 0. An offset pointing beyond * the end of the node's data will be normalized to point at the end of the * node. * * @returns A new [[Location]], if the offset was adjusted. Otherwise, it * returns ``this``. */ normalizeOffset(): DOMLoc { const normalized = this.normalizedOffset; const { offset, node } = this; return normalized === offset ? this : new DOMLoc(node, normalized); } /** * Determine whether this location and another location are equal. * * @returns Whether ``this`` and ``other`` are equal. */ equals(other: DOMLoc | undefined | null): boolean { return other != null && (this === other || (this.node === other.node && this.offset === other.offset)); } /** * Compare this location with another in document order. * * @param other The other location to compare. * * @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two * locations are equal, 1 if ``this`` is later than ``other``. * * @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected" * (i.e. do not belong to the same document). */ compare(other: DOMLoc): -1 | 0 | 1 { if (this.equals(other)) { return 0; } const { node, offset } = this; const { node: otherNode, offset: otherOffset } = other; if (node === otherNode) { // The case where offset === otherOffset cannot happen here because it is // covered above. return offset - otherOffset < 0 ? -1 : 1; } const result = node.compareDocumentPosition(otherNode); // tslint:disable:no-bitwise if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) { throw new ComparingDisconnectedNodes(); } if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) { // otherNode follows node. return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ? // otherNode is contained by node but we still need to figure out the // relative positions of the node pointed by [node, offset] and // otherNode. pointedCompare(node, offset, otherNode) : // otherNode just follows node, no parent child relation -1; } if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("neither preceding nor following: this should not \ happen"); } // otherNode precedes node. return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 && // otherNode contains node but we still need to figure out the // relative positions of the node pointed by [otherNode, // otherOffset] and node. pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1; // tslint:enable:no-bitwise } } type NodeTest = (node: Node) => boolean; // tslint:disable-next-line:no-any function indexOf(arrayLike: any, el: any): number { return Array.prototype.indexOf.call(arrayLike, el); } /** * A space delimits a part of a DOM tree in which one can obtain locations. */ export class DOMSpace implements Iterable<DOMLoc> { /** * @param min The minimum location included in this space. * * @param max The maximum location included in this space. * * @param relevanceTest A test to determine whether a node is relevant. This * space does not produce locations into irrelevant nodes. * * @throws {CannotEscapeIrrelevantNode} If the container is irrelevant. * * @throw {ReversedRangeError} If ``max`` is less than ``min``. */ constructor(readonly min: DOMLoc, readonly max: DOMLoc, readonly relevanceTest: NodeTest = () => true) { if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) { throw new CannotEscapeIrrelevantNode(); } // Man could be equal to min but it cannot be less than min. if (max.compare(min) < 0) { throw new ReversedRangeError(); } } static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace { return new DOMSpace(new DOMLoc(node, 0), new DOMLoc(node, node.childNodes.length), relevanceTest); } /** * Test whether a node is contextually relevant. This method runs some stock * tests and if necessary calls [[Space.relevanceTest]]. * * @param node The node to test. * * @returns ``true`` if the node is contextually relevant, ``false`` if not. */ isRelevant(node: Node): boolean { const { nodeType } = node; return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE || nodeType === Node.DOCUMENT_NODE || nodeType === Node.DOCUMENT_FRAGMENT_NODE) && this.relevanceTest(node); } /** * Determine whether this space contains a location. * * @param loc The location to test. * * @returns Whether the location is inside the space. */ contains(loc: DOMLoc): boolean { try { return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0; } catch (ex) { if (ex instanceof ComparingDisconnectedNodes) { return false; } /* istanbul ignore next: there's currently no way to get here */ throw ex; } } /** * Determine whether this space contains a node. * * @param node The node to test. * * @returns Whether the node is inside the space. */ containsNode(node: Node): boolean { return node.parentNode !== null && this.contains(DOMLoc.makePointingTo(node)); } /** * If the current location is irrelevant node, then produce a new relevant * location pointing to the contextually irrelevant node. This is "escaping" * the node in the sense that the location provided by this method is pointing * at the irrelevant node *from outside*. * * This method also normalizes the location. * * @param location The location to escape. * * @returns If ``location`` was already relevant, and already normalized, then * return ``location``. Otherwise, the new relevant location. * * @throws {DOMSpaceScopeError} If ``location`` is not within the space. */ escapeIrrelevantNode(location: DOMLoc): DOMLoc { if (!this.contains(location)) { throw new DOMSpaceScopeError(); } const normalized = location.normalizeOffset(); let node: Node | null = normalized.node; const ancestorsAndSelf: Node[] = []; while (node !== null && this.containsNode(node)) { ancestorsAndSelf.push(node); node = node.parentNode; } // We reverse the nodes to scan them form topmost node down to the original // location. const reversed = ancestorsAndSelf.reverse(); const first = reversed[0]; for (const candidate of reversed) { if (!this.isRelevant(candidate)) { // parentNode cannot be null, unless we are the first in the array. // tslint:disable-next-line:no-non-null-assertion const parentNode = candidate.parentNode!; // If this is the first candidate, then the parent is outside the // container, and we cannot use it. We don't have a good location to // return. This should never happen because the container is required to // be relevant. if (candidate === first) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("internal error: we should always be able to escape \ a location which is inside the space"); } return new DOMLoc(parentNode, indexOf(parentNode.childNodes, candidate)); } } // None of the ancestors or the node itself were irrelevant, so the original // location was fine. return normalized; } /** * Compute the next relevant location from a starting point. * * @param start The location from which to start. * * @returns The next relevant location. Or ``null`` if there is no next * relevant location within the space. Remember: the *location* is relevant, * but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ next(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[offset++]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, 0) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (++offset <= (node as Text).length) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling after our starting node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parent we encounter is necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node) + 1); } return this.contains(loc) ? loc : null; } /** * Compute the previous relevant location from a starting point. * * @param start The location from which to start. * * @returns The previous relevant location. Or ``null`` if there is no * previous relevant location inside the space. Remember: the *location* is * relevant, but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ previous(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[--offset]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, pointedNode.nodeType === Node.TEXT_NODE ? (pointedNode as Text).length : pointedNode.childNodes.length) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (--offset >= 0) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling before our starting // node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parents we encounter are necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node)); } return this.contains(loc) ? loc : null; } /** * Produce an iterable iterator that iterates in document order. */ *[Symbol.iterator](): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.min; do { yield current; current = this.next(current); } while (current !== null); } /** * Produce an iterable iterator that iterates in reverse document order. */ *reversed(): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.max; do { yield current; current = this.previous(current); } while (current !== null); } }
pointedCompare
identifier_name
dom-movement.ts
/** * This is required to work around a problem when extending built-in classes * like ``Error``. Some of the constructors for these classes return a value * from the constructor, which is then picked up by the constructors generated * by TypeScript (same with ES6 code transpiled through Babel), and this messes * up the inheritance chain. * * See https://github.com/Microsoft/TypeScript/issues/12123. */ // tslint:disable-next-line:ban-types export function fixPrototype(obj: any, parent: Function): void { // getPrototypeOf is supported as far back as IE9 const oldProto = Object.getPrototypeOf(obj); if (oldProto !== parent) { // setPrototypeOf is supported as far back as IE11 if (Object.setPrototypeOf !== undefined) { Object.setPrototypeOf(obj, parent.prototype); } else { obj.__proto__ = parent.prototype; } } } /** * This error is raised when a location is passed to a [[DOMSpace]] instance and * the location is not within the space. */ export class DOMSpaceScopeError extends Error { constructor() { super("location is not within the space"); fixPrototype(this, DOMSpaceScopeError); } } /** * This error is raised when a location into an irrelevant node cannot be * escaped. */ export class CannotEscapeIrrelevantNode extends Error { constructor() { super("location is irrelevant and cannot be escaped"); fixPrototype(this, CannotEscapeIrrelevantNode); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ReversedRangeError extends Error { constructor() { super("tried to use a reversed range"); fixPrototype(this, ReversedRangeError); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ComparingDisconnectedNodes extends Error { constructor() { super("cannot compare disconnected nodes"); fixPrototype(this, ComparingDisconnectedNodes); } } /** * Compare a ``[node, offset]`` location with another node which is known to be * a child of ``node``. * * @param node The node of the location. * * @param offset The offset of the location. * * @param child Another node to compare with ``[node, offset]`` which we already * know is a child of ``node``. * * @returns -1 if ``[node, offset]`` is before ``child``, 1 otherwise. */ function pointedCompare(node: Node, offset: number, child: Node): 1 | 0 | -1 { const pointed = node.childNodes[offset]; if (pointed === undefined) { // Undefined means we are after all other elements. (A negative offset, // before all nodes, is not possible here.) return 1; } // We return -1 when pointed === child because the actual position we care // about is *inside* child. Since it is inside child, ``[node, offset]`` // necessarily precedes that location. return pointed === child || // tslint:disable-next-line:no-bitwise (pointed.compareDocumentPosition(child) & Node.DOCUMENT_POSITION_FOLLOWING) !== 0 ? -1 : // child follows pointed 1; // child is before pointed } /** * Models a DOM location. A DOM location is a pair of node and offset. * * In theory it would be possible to support nodes of any type, but this library * currently only supports only ``Element``, ``Document``, ``DocumentFragment``, * and ``Text`` for the node. * * Consider the following example: * * <p>I am a <b>little</b> teapot.</p> * * A location of ``(p, 0)`` points to the first text node of the top * level ``p`` element. * * A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text * node inside ``p``. * * A location of ``(p.childNodes[0], 7)`` points to the end of the first text * node inside ``p``. This is a location after all the text in the node. * * A location of ``(p, 1)`` points to the ``b`` element inside ``p``. */ export class DOMLoc { constructor(readonly node: Node, readonly offset: number) { if (offset < 0) { throw new Error("offset cannot be negative"); } } static makePointingTo(node: Node): DOMLoc { const parent = node.parentNode; if (parent === null) { throw new Error("cannot point a node without a parent"); } return new DOMLoc(parent, indexOf(parent.childNodes, node)); } /** * @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal * to those of this location. Otherwise, return ``this``. */ newIfDifferent(node: Node, offset: number): DOMLoc { return (this.node === node && this.offset === offset) ? this : new DOMLoc(node, offset); } /** * This is the node to which this location points. When the location points to * a text node, the pointed node is the text node. When the location points to * anything else, the pointed node is the child node at the offset of the * location. This may be undefined when the location points beyond the last * child. */ get pointedNode(): Node | null { const { node } = this; if (node.nodeType === Node.TEXT_NODE) { return node; } const pointed = node.childNodes[this.offset]; return pointed === undefined ? null : pointed; } /** * The offset contained by this location, but normalized. An offset pointing * beyond the end of the node's data will be normalized to point at the end of * the node. */ get normalizedOffset(): number { const { offset, node } = this; switch (node.nodeType) { case Node.DOCUMENT_NODE: case Node.DOCUMENT_FRAGMENT_NODE: case Node.ELEMENT_NODE: { const { childNodes: { length } } = node; return offset > length ? length : offset; } case Node.TEXT_NODE: { const { length } = node as Text; return offset > length ? length : offset; } default: throw new Error(`cannot normalize offset in a node of type: \ ${node.nodeType}`); } } /** * ``true`` if the location is already normalized. ``false`` if not. */ get isNormalized(): boolean { return this.offset === this.normalizedOffset; } /** * Convert a location with an offset which is out of bounds, to a location * with an offset within bounds. * * An offset less than 0 will be normalized to 0. An offset pointing beyond * the end of the node's data will be normalized to point at the end of the * node. * * @returns A new [[Location]], if the offset was adjusted. Otherwise, it * returns ``this``. */ normalizeOffset(): DOMLoc { const normalized = this.normalizedOffset; const { offset, node } = this; return normalized === offset ? this : new DOMLoc(node, normalized); } /** * Determine whether this location and another location are equal. * * @returns Whether ``this`` and ``other`` are equal. */ equals(other: DOMLoc | undefined | null): boolean { return other != null && (this === other || (this.node === other.node && this.offset === other.offset)); } /** * Compare this location with another in document order. * * @param other The other location to compare. * * @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two * locations are equal, 1 if ``this`` is later than ``other``. * * @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected" * (i.e. do not belong to the same document). */ compare(other: DOMLoc): -1 | 0 | 1
} type NodeTest = (node: Node) => boolean; // tslint:disable-next-line:no-any function indexOf(arrayLike: any, el: any): number { return Array.prototype.indexOf.call(arrayLike, el); } /** * A space delimits a part of a DOM tree in which one can obtain locations. */ export class DOMSpace implements Iterable<DOMLoc> { /** * @param min The minimum location included in this space. * * @param max The maximum location included in this space. * * @param relevanceTest A test to determine whether a node is relevant. This * space does not produce locations into irrelevant nodes. * * @throws {CannotEscapeIrrelevantNode} If the container is irrelevant. * * @throw {ReversedRangeError} If ``max`` is less than ``min``. */ constructor(readonly min: DOMLoc, readonly max: DOMLoc, readonly relevanceTest: NodeTest = () => true) { if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) { throw new CannotEscapeIrrelevantNode(); } // Man could be equal to min but it cannot be less than min. if (max.compare(min) < 0) { throw new ReversedRangeError(); } } static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace { return new DOMSpace(new DOMLoc(node, 0), new DOMLoc(node, node.childNodes.length), relevanceTest); } /** * Test whether a node is contextually relevant. This method runs some stock * tests and if necessary calls [[Space.relevanceTest]]. * * @param node The node to test. * * @returns ``true`` if the node is contextually relevant, ``false`` if not. */ isRelevant(node: Node): boolean { const { nodeType } = node; return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE || nodeType === Node.DOCUMENT_NODE || nodeType === Node.DOCUMENT_FRAGMENT_NODE) && this.relevanceTest(node); } /** * Determine whether this space contains a location. * * @param loc The location to test. * * @returns Whether the location is inside the space. */ contains(loc: DOMLoc): boolean { try { return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0; } catch (ex) { if (ex instanceof ComparingDisconnectedNodes) { return false; } /* istanbul ignore next: there's currently no way to get here */ throw ex; } } /** * Determine whether this space contains a node. * * @param node The node to test. * * @returns Whether the node is inside the space. */ containsNode(node: Node): boolean { return node.parentNode !== null && this.contains(DOMLoc.makePointingTo(node)); } /** * If the current location is irrelevant node, then produce a new relevant * location pointing to the contextually irrelevant node. This is "escaping" * the node in the sense that the location provided by this method is pointing * at the irrelevant node *from outside*. * * This method also normalizes the location. * * @param location The location to escape. * * @returns If ``location`` was already relevant, and already normalized, then * return ``location``. Otherwise, the new relevant location. * * @throws {DOMSpaceScopeError} If ``location`` is not within the space. */ escapeIrrelevantNode(location: DOMLoc): DOMLoc { if (!this.contains(location)) { throw new DOMSpaceScopeError(); } const normalized = location.normalizeOffset(); let node: Node | null = normalized.node; const ancestorsAndSelf: Node[] = []; while (node !== null && this.containsNode(node)) { ancestorsAndSelf.push(node); node = node.parentNode; } // We reverse the nodes to scan them form topmost node down to the original // location. const reversed = ancestorsAndSelf.reverse(); const first = reversed[0]; for (const candidate of reversed) { if (!this.isRelevant(candidate)) { // parentNode cannot be null, unless we are the first in the array. // tslint:disable-next-line:no-non-null-assertion const parentNode = candidate.parentNode!; // If this is the first candidate, then the parent is outside the // container, and we cannot use it. We don't have a good location to // return. This should never happen because the container is required to // be relevant. if (candidate === first) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("internal error: we should always be able to escape \ a location which is inside the space"); } return new DOMLoc(parentNode, indexOf(parentNode.childNodes, candidate)); } } // None of the ancestors or the node itself were irrelevant, so the original // location was fine. return normalized; } /** * Compute the next relevant location from a starting point. * * @param start The location from which to start. * * @returns The next relevant location. Or ``null`` if there is no next * relevant location within the space. Remember: the *location* is relevant, * but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ next(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[offset++]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, 0) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (++offset <= (node as Text).length) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling after our starting node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parent we encounter is necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node) + 1); } return this.contains(loc) ? loc : null; } /** * Compute the previous relevant location from a starting point. * * @param start The location from which to start. * * @returns The previous relevant location. Or ``null`` if there is no * previous relevant location inside the space. Remember: the *location* is * relevant, but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ previous(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[--offset]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, pointedNode.nodeType === Node.TEXT_NODE ? (pointedNode as Text).length : pointedNode.childNodes.length) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (--offset >= 0) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling before our starting // node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parents we encounter are necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node)); } return this.contains(loc) ? loc : null; } /** * Produce an iterable iterator that iterates in document order. */ *[Symbol.iterator](): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.min; do { yield current; current = this.next(current); } while (current !== null); } /** * Produce an iterable iterator that iterates in reverse document order. */ *reversed(): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.max; do { yield current; current = this.previous(current); } while (current !== null); } }
{ if (this.equals(other)) { return 0; } const { node, offset } = this; const { node: otherNode, offset: otherOffset } = other; if (node === otherNode) { // The case where offset === otherOffset cannot happen here because it is // covered above. return offset - otherOffset < 0 ? -1 : 1; } const result = node.compareDocumentPosition(otherNode); // tslint:disable:no-bitwise if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) { throw new ComparingDisconnectedNodes(); } if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) { // otherNode follows node. return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ? // otherNode is contained by node but we still need to figure out the // relative positions of the node pointed by [node, offset] and // otherNode. pointedCompare(node, offset, otherNode) : // otherNode just follows node, no parent child relation -1; } if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("neither preceding nor following: this should not \ happen"); } // otherNode precedes node. return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 && // otherNode contains node but we still need to figure out the // relative positions of the node pointed by [otherNode, // otherOffset] and node. pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1; // tslint:enable:no-bitwise }
identifier_body
dom-movement.ts
/** * This is required to work around a problem when extending built-in classes * like ``Error``. Some of the constructors for these classes return a value * from the constructor, which is then picked up by the constructors generated * by TypeScript (same with ES6 code transpiled through Babel), and this messes * up the inheritance chain. * * See https://github.com/Microsoft/TypeScript/issues/12123. */ // tslint:disable-next-line:ban-types export function fixPrototype(obj: any, parent: Function): void { // getPrototypeOf is supported as far back as IE9 const oldProto = Object.getPrototypeOf(obj); if (oldProto !== parent) { // setPrototypeOf is supported as far back as IE11 if (Object.setPrototypeOf !== undefined) { Object.setPrototypeOf(obj, parent.prototype); } else { obj.__proto__ = parent.prototype; } } } /** * This error is raised when a location is passed to a [[DOMSpace]] instance and * the location is not within the space. */ export class DOMSpaceScopeError extends Error { constructor() { super("location is not within the space"); fixPrototype(this, DOMSpaceScopeError); } } /** * This error is raised when a location into an irrelevant node cannot be * escaped. */ export class CannotEscapeIrrelevantNode extends Error { constructor() { super("location is irrelevant and cannot be escaped"); fixPrototype(this, CannotEscapeIrrelevantNode); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ReversedRangeError extends Error { constructor() { super("tried to use a reversed range"); fixPrototype(this, ReversedRangeError); } } /** * This error is raised when trying to specify a range with a minimum end point * which is past the maximum end point. */ export class ComparingDisconnectedNodes extends Error { constructor() { super("cannot compare disconnected nodes"); fixPrototype(this, ComparingDisconnectedNodes); } } /** * Compare a ``[node, offset]`` location with another node which is known to be * a child of ``node``. * * @param node The node of the location. * * @param offset The offset of the location. * * @param child Another node to compare with ``[node, offset]`` which we already * know is a child of ``node``. * * @returns -1 if ``[node, offset]`` is before ``child``, 1 otherwise. */ function pointedCompare(node: Node, offset: number, child: Node): 1 | 0 | -1 { const pointed = node.childNodes[offset]; if (pointed === undefined) { // Undefined means we are after all other elements. (A negative offset, // before all nodes, is not possible here.) return 1; } // We return -1 when pointed === child because the actual position we care // about is *inside* child. Since it is inside child, ``[node, offset]`` // necessarily precedes that location. return pointed === child || // tslint:disable-next-line:no-bitwise (pointed.compareDocumentPosition(child) & Node.DOCUMENT_POSITION_FOLLOWING) !== 0 ? -1 : // child follows pointed 1; // child is before pointed } /** * Models a DOM location. A DOM location is a pair of node and offset. * * In theory it would be possible to support nodes of any type, but this library * currently only supports only ``Element``, ``Document``, ``DocumentFragment``, * and ``Text`` for the node. * * Consider the following example: * * <p>I am a <b>little</b> teapot.</p> * * A location of ``(p, 0)`` points to the first text node of the top * level ``p`` element. * * A location of ``(p.childNodes[0], 0)`` points to the letter "I" in first text * node inside ``p``. * * A location of ``(p.childNodes[0], 7)`` points to the end of the first text * node inside ``p``. This is a location after all the text in the node. * * A location of ``(p, 1)`` points to the ``b`` element inside ``p``. */ export class DOMLoc { constructor(readonly node: Node, readonly offset: number) { if (offset < 0) { throw new Error("offset cannot be negative"); } } static makePointingTo(node: Node): DOMLoc { const parent = node.parentNode; if (parent === null) { throw new Error("cannot point a node without a parent"); } return new DOMLoc(parent, indexOf(parent.childNodes, node)); } /** * @returns A new [[Location]], if the ``node``, ``offset`` pair are not equal * to those of this location. Otherwise, return ``this``. */ newIfDifferent(node: Node, offset: number): DOMLoc { return (this.node === node && this.offset === offset) ? this : new DOMLoc(node, offset); } /** * This is the node to which this location points. When the location points to * a text node, the pointed node is the text node. When the location points to * anything else, the pointed node is the child node at the offset of the * location. This may be undefined when the location points beyond the last * child. */ get pointedNode(): Node | null { const { node } = this; if (node.nodeType === Node.TEXT_NODE) { return node; } const pointed = node.childNodes[this.offset]; return pointed === undefined ? null : pointed; } /** * The offset contained by this location, but normalized. An offset pointing * beyond the end of the node's data will be normalized to point at the end of * the node. */ get normalizedOffset(): number { const { offset, node } = this; switch (node.nodeType) { case Node.DOCUMENT_NODE: case Node.DOCUMENT_FRAGMENT_NODE: case Node.ELEMENT_NODE: { const { childNodes: { length } } = node; return offset > length ? length : offset; } case Node.TEXT_NODE: { const { length } = node as Text; return offset > length ? length : offset; } default: throw new Error(`cannot normalize offset in a node of type: \ ${node.nodeType}`); } }
return this.offset === this.normalizedOffset; } /** * Convert a location with an offset which is out of bounds, to a location * with an offset within bounds. * * An offset less than 0 will be normalized to 0. An offset pointing beyond * the end of the node's data will be normalized to point at the end of the * node. * * @returns A new [[Location]], if the offset was adjusted. Otherwise, it * returns ``this``. */ normalizeOffset(): DOMLoc { const normalized = this.normalizedOffset; const { offset, node } = this; return normalized === offset ? this : new DOMLoc(node, normalized); } /** * Determine whether this location and another location are equal. * * @returns Whether ``this`` and ``other`` are equal. */ equals(other: DOMLoc | undefined | null): boolean { return other != null && (this === other || (this.node === other.node && this.offset === other.offset)); } /** * Compare this location with another in document order. * * @param other The other location to compare. * * @returns -1 if ``this`` is earlier than ``other``, ``0`` if the two * locations are equal, 1 if ``this`` is later than ``other``. * * @throws {ComparingDisconnectedNodes} If the two nodes are "disconnected" * (i.e. do not belong to the same document). */ compare(other: DOMLoc): -1 | 0 | 1 { if (this.equals(other)) { return 0; } const { node, offset } = this; const { node: otherNode, offset: otherOffset } = other; if (node === otherNode) { // The case where offset === otherOffset cannot happen here because it is // covered above. return offset - otherOffset < 0 ? -1 : 1; } const result = node.compareDocumentPosition(otherNode); // tslint:disable:no-bitwise if ((result & Node.DOCUMENT_POSITION_DISCONNECTED) !== 0) { throw new ComparingDisconnectedNodes(); } if ((result & Node.DOCUMENT_POSITION_FOLLOWING) !== 0) { // otherNode follows node. return (result & Node.DOCUMENT_POSITION_CONTAINED_BY) !== 0 ? // otherNode is contained by node but we still need to figure out the // relative positions of the node pointed by [node, offset] and // otherNode. pointedCompare(node, offset, otherNode) : // otherNode just follows node, no parent child relation -1; } if ((result & Node.DOCUMENT_POSITION_PRECEDING) === 0) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("neither preceding nor following: this should not \ happen"); } // otherNode precedes node. return ((result & Node.DOCUMENT_POSITION_CONTAINS) !== 0 && // otherNode contains node but we still need to figure out the // relative positions of the node pointed by [otherNode, // otherOffset] and node. pointedCompare(otherNode, otherOffset, node) > 0) ? -1 : 1; // tslint:enable:no-bitwise } } type NodeTest = (node: Node) => boolean; // tslint:disable-next-line:no-any function indexOf(arrayLike: any, el: any): number { return Array.prototype.indexOf.call(arrayLike, el); } /** * A space delimits a part of a DOM tree in which one can obtain locations. */ export class DOMSpace implements Iterable<DOMLoc> { /** * @param min The minimum location included in this space. * * @param max The maximum location included in this space. * * @param relevanceTest A test to determine whether a node is relevant. This * space does not produce locations into irrelevant nodes. * * @throws {CannotEscapeIrrelevantNode} If the container is irrelevant. * * @throw {ReversedRangeError} If ``max`` is less than ``min``. */ constructor(readonly min: DOMLoc, readonly max: DOMLoc, readonly relevanceTest: NodeTest = () => true) { if (!(this.isRelevant(min.node) && this.isRelevant(max.node))) { throw new CannotEscapeIrrelevantNode(); } // Man could be equal to min but it cannot be less than min. if (max.compare(min) < 0) { throw new ReversedRangeError(); } } static makeSpanningNode(node: Node, relevanceTest?: NodeTest): DOMSpace { return new DOMSpace(new DOMLoc(node, 0), new DOMLoc(node, node.childNodes.length), relevanceTest); } /** * Test whether a node is contextually relevant. This method runs some stock * tests and if necessary calls [[Space.relevanceTest]]. * * @param node The node to test. * * @returns ``true`` if the node is contextually relevant, ``false`` if not. */ isRelevant(node: Node): boolean { const { nodeType } = node; return (nodeType === Node.ELEMENT_NODE || nodeType === Node.TEXT_NODE || nodeType === Node.DOCUMENT_NODE || nodeType === Node.DOCUMENT_FRAGMENT_NODE) && this.relevanceTest(node); } /** * Determine whether this space contains a location. * * @param loc The location to test. * * @returns Whether the location is inside the space. */ contains(loc: DOMLoc): boolean { try { return this.min.compare(loc) <= 0 && this.max.compare(loc) >= 0; } catch (ex) { if (ex instanceof ComparingDisconnectedNodes) { return false; } /* istanbul ignore next: there's currently no way to get here */ throw ex; } } /** * Determine whether this space contains a node. * * @param node The node to test. * * @returns Whether the node is inside the space. */ containsNode(node: Node): boolean { return node.parentNode !== null && this.contains(DOMLoc.makePointingTo(node)); } /** * If the current location is irrelevant node, then produce a new relevant * location pointing to the contextually irrelevant node. This is "escaping" * the node in the sense that the location provided by this method is pointing * at the irrelevant node *from outside*. * * This method also normalizes the location. * * @param location The location to escape. * * @returns If ``location`` was already relevant, and already normalized, then * return ``location``. Otherwise, the new relevant location. * * @throws {DOMSpaceScopeError} If ``location`` is not within the space. */ escapeIrrelevantNode(location: DOMLoc): DOMLoc { if (!this.contains(location)) { throw new DOMSpaceScopeError(); } const normalized = location.normalizeOffset(); let node: Node | null = normalized.node; const ancestorsAndSelf: Node[] = []; while (node !== null && this.containsNode(node)) { ancestorsAndSelf.push(node); node = node.parentNode; } // We reverse the nodes to scan them form topmost node down to the original // location. const reversed = ancestorsAndSelf.reverse(); const first = reversed[0]; for (const candidate of reversed) { if (!this.isRelevant(candidate)) { // parentNode cannot be null, unless we are the first in the array. // tslint:disable-next-line:no-non-null-assertion const parentNode = candidate.parentNode!; // If this is the first candidate, then the parent is outside the // container, and we cannot use it. We don't have a good location to // return. This should never happen because the container is required to // be relevant. if (candidate === first) { /* istanbul ignore next: there's no means to generate this error */ throw new Error("internal error: we should always be able to escape \ a location which is inside the space"); } return new DOMLoc(parentNode, indexOf(parentNode.childNodes, candidate)); } } // None of the ancestors or the node itself were irrelevant, so the original // location was fine. return normalized; } /** * Compute the next relevant location from a starting point. * * @param start The location from which to start. * * @returns The next relevant location. Or ``null`` if there is no next * relevant location within the space. Remember: the *location* is relevant, * but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ next(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[offset++]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, 0) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (++offset <= (node as Text).length) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling after our starting node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parent we encounter is necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node) + 1); } return this.contains(loc) ? loc : null; } /** * Compute the previous relevant location from a starting point. * * @param start The location from which to start. * * @returns The previous relevant location. Or ``null`` if there is no * previous relevant location inside the space. Remember: the *location* is * relevant, but can point to an irrelevant node. * * @throws {DOMSpaceScopeError} If ``start`` is not within the space. * * @throws {CannotEscapeIrrelevantNode} If ``start`` is irrelevant and cannot * be escaped. */ previous(start: DOMLoc): DOMLoc | null { // tslint:disable-next-line:prefer-const let { node, offset } = this.escapeIrrelevantNode(start); let loc: DOMLoc | undefined; switch (node.nodeType) { case Node.DOCUMENT_FRAGMENT_NODE: case Node.DOCUMENT_NODE: case Node.ELEMENT_NODE: const pointedNode = node.childNodes[--offset]; if (pointedNode !== undefined) { loc = this.isRelevant(pointedNode) ? new DOMLoc(pointedNode, pointedNode.nodeType === Node.TEXT_NODE ? (pointedNode as Text).length : pointedNode.childNodes.length) : new DOMLoc(node, offset); } break; case Node.TEXT_NODE: if (--offset >= 0) { loc = new DOMLoc(node, offset); } break; /* istanbul ignore next: we cannot get there */ default: // Due to escapeIrrelevantNode, we should never get here. throw new Error(`internal error: unexpected type ${node.nodeType}`); } if (loc === undefined) { // If we get here, we have to move to the sibling before our starting // node. // Note that because of the escapeIrrelevantNode at the beginning of this // function, the parents we encounter are necessarily relevant. const { parentNode } = node; if (parentNode === null) { return null; } loc = new DOMLoc(parentNode, indexOf(parentNode.childNodes, node)); } return this.contains(loc) ? loc : null; } /** * Produce an iterable iterator that iterates in document order. */ *[Symbol.iterator](): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.min; do { yield current; current = this.next(current); } while (current !== null); } /** * Produce an iterable iterator that iterates in reverse document order. */ *reversed(): IterableIterator<DOMLoc> { let current: DOMLoc | null = this.max; do { yield current; current = this.previous(current); } while (current !== null); } }
/** * ``true`` if the location is already normalized. ``false`` if not. */ get isNormalized(): boolean {
random_line_split
routing.py
"""Routing configuration, broken out separately for ease of consultation without going through the whole app config everything. Some useful helpers are at the bottom. Be familiar with them! """ import re import floof.model as model from floof.resource import contextualize from pyramid.exceptions import NotFound from sqlalchemy.orm.exc import NoResultFound def configure_routing(config): """Adds route declarations to the app config.""" # Static file access. Separate root for each subdirectory, because Pyramid # treats these as first-class routables rather than a last-ditch fallback config.add_static_view('/css', 'floof:assets/css') config.add_static_view('/files', 'floof:assets/files') # dummy file store config.add_static_view('/icons', 'floof:assets/icons') config.add_static_view('/images', 'floof:assets/images') config.add_static_view('/js', 'floof:assets/js') # TODO this doesn't actually work config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico') r = config.add_route # Miscellaneous root stuff r('root', '/') r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator) r('reproxy', '/reproxy') r('log', '/log') # Registration and auth r('account.login', '/account/login') r('account.login_begin', '/account/login_begin') r('account.login_finish', '/account/login_finish') r('account.register', '/account/register') r('account.add_identity', '/account/add_identity') r('account.persona.login', '/account/persona/login') r('account.logout', '/account/logout') r('account.profile', '/account/profile') # Regular user control panel r('controls.index', '/account/controls') r('controls.auth', '/account/controls/authentication') r('controls.persona', '/account/controls/persona') r('controls.persona.add', '/account/controls/persona/add') r('controls.persona.remove', '/account/controls/persona/remove') r('controls.openid', '/account/controls/openid') r('controls.openid.add', '/account/controls/openid/add') r('controls.openid.add_finish', '/account/controls/openid/add_finish') r('controls.openid.remove', '/account/controls/openid/remove') r('controls.rels', '/account/controls/relationships') r('controls.rels.watch', '/account/controls/relationships/watch') r('controls.rels.unwatch', '/account/controls/relationships/unwatch') r('controls.info', '/account/controls/user_info') r('controls.certs', '/account/controls/certificates') r('controls.certs.add', '/account/controls/certificates/add') r('controls.certs.generate_server', '/account/controls/certificates/gen/cert-{name}.p12') r('controls.certs.details', '/account/controls/certificates/details/{serial:[0-9a-f]+}') r('controls.certs.download', '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem') r('controls.certs.revoke', '/account/controls/certificates/revoke/{serial:[0-9a-f]+}') # User pages kw = sqla_route_options('user', 'name', model.User.name) r('users.view', '/users/{name}', **kw) r('users.art', '/users/{name}/art', **kw) r('users.art_by_album', '/users/{name}/art/{album}', **kw) r('users.profile', '/users/{name}/profile', **kw) r('users.watchstream', '/users/{name}/watchstream', **kw) r('albums.user_index', '/users/{name}/albums', **kw) r('api:users.list', '/users.json') # Artwork kw = sqla_route_options('artwork', 'id', model.Artwork.id) kw['pregenerator'] = artwork_pregenerator r('art.browse', '/art') r('art.upload', '/art/upload') r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw) r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw) r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw) r('art.rate', r'/art/{id:\d+}/rate', **kw) # Tags # XXX what should the tag name regex be, if anything? # XXX should the regex be checked in the 'factory' instead? way easier that way... kw = sqla_route_options('tag', 'name', model.Tag.name) r('tags.list', '/tags') r('tags.view', '/tags/{name}', **kw) r('tags.artwork', '/tags/{name}/artwork', **kw) # Albums # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has user_router = SugarRouter(config, '/users/{user}', model.User.name) album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user) album_router.add_route('albums.artwork', '') # Administration r('admin.dashboard', '/admin') r('admin.log', '/admin/log') # Debugging r('debug.blank', '/debug/blank') r('debug.crash', '/debug/crash') r('debug.mako-crash', '/debug/mako-crash') r('debug.status.303', '/debug/303') r('debug.status.400', '/debug/400') r('debug.status.403', '/debug/403') r('debug.status.404', '/debug/404') # Comments; made complex because they can attach to different parent URLs. # Rather than hack around how Pyramid's routes works, we can just use our # own class that does what we want! # XXX 1: make this work for users as well # XXX 2: make the other routes work # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes parent_route_names = ('art.view', 'user.view') mapper = config.get_routes_mapper() parent_routes = [mapper.get_route(name) for name in parent_route_names] commentables = dict( users=model.User.name, art=model.Artwork.id, ) def comments_factory(request): # XXX prefetching on these? type = request.matchdict['type'] identifier = request.matchdict['identifier'] try: sqla_column = commentables[type] entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one() except (NoResultFound, KeyError): # 404! raise NotFound() if 'comment_id' not in request.matchdict: return contextualize(entity.discussion) # URLs to specific comments should have those comments as the context try: return contextualize( model.session .query(model.Comment) .with_parent(entity.discussion) .filter(model.Comment.id == request.matchdict['comment_id']) .one()) except NoResultFound: raise NotFound() def comments_pregenerator(request, elements, kw): resource = None comment = kw.get('comment', None) if comment: kw['comment_id'] = comment.id if 'resource' not in kw: resource = comment.discussion.resource if not resource: resource = kw['resource'] # XXX users... entity = resource.member kw['type'] = 'art' kw['identifier'] = entity.id return elements, kw r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory) r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator) class SugarRouter(object): """Glues routing to the ORM. Use me like this: foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier) foo_router.add_route('foo_edit', '/edit') This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the context will be set to the corresponding `Foo` object. The reverse works as well: request.route_url('foo_edit', foo=some_foo_row) """ # TODO: support URLs like /art/123-title-that-doesnt-matter # ...but only do it for the root url, i think def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None): self.config = config self.url_prefix = url_prefix self.sqla_column = sqla_column self.sqla_table = sqla_column.parententity self.parent_router = parent_router self.sqla_rel = rel assert (self.parent_router is None) == (self.sqla_rel is None) # This is the {key} that appears in the matchdict and generated route, # as well as the kwarg passed to route_url match = re.search(r'[{](\w+)[}]', url_prefix) if not match: raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix)) self.key = match.group(1) ### Dealing with chaining def chain(self, url_prefix, sqla_column, rel): """Create a new sugar router with this one as the parent.""" return self.__class__( self.config, url_prefix, sqla_column, parent_router=self, rel=rel) @property def full_url_prefix(self):
def filter_sqlalchemy_query(self, query, request): """Takes a query, filters it as demanded by the matchdict, and returns a new one. """ query = query.filter(self.sqla_column == request.matchdict[self.key]) if self.parent_router: query = query.join(self.sqla_rel) query = self.parent_router.filter_sqlalchemy_query( query, request) return query ### Actual routing stuff def add_route(self, route_name, suffix, **kwargs): """Analog to `config.add_route()`, with magic baked in. Extra kwargs are passed along. """ kwargs['pregenerator'] = self.pregenerator kwargs['factory'] = self.factory self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs) def pregenerator(self, request, elements, kw): """Passed to Pyramid as a bound method when creating a route. Converts the arguments to route_url (which should be row objects) into URL-friendly strings. """ # Get the row object, and get the property from it row = kw.pop(self.key) kw[self.key] = self.sqla_column.__get__(row, type(row)) if self.parent_router: # Parent needs its own treatment here, too. Fill in the parent # object automatically kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row)) elements, kw = self.parent_router.pregenerator(request, elements, kw) return elements, kw def factory(self, request): """Passed to Pyramid as a bound method when creating a route. Translates a matched URL to an ORM row, which becomes the context. """ # This yields the "context", which should be the row object try: q = model.session.query(self.sqla_table) q = self.filter_sqlalchemy_query(q, request) return q.one() except NoResultFound: # 404! raise NotFound() def sqla_route_options(url_key, match_key, sqla_column): """Returns a dict of route options that are helpful for routes representing SQLA objects. ``url_key``: The key to use for a SQLA object when calling ``route_url()``. ``match_key``: The key in the matchdict that contains the row identifier. ``sqla_column``: The SQLA ORM column that appears in the URL. """ def pregenerator(request, elements, kw): # Get the row object, and get the property from it row = kw.pop(url_key) kw[match_key] = sqla_column.__get__(row, type(row)) return elements, kw def factory(request): # This yields the "context", which should be the row object try: return contextualize( model.session.query(sqla_column.parententity) .filter(sqla_column == request.matchdict[match_key]) .one()) except NoResultFound: # 404! raise NotFound() return dict(pregenerator=pregenerator, factory=factory) def artwork_pregenerator(request, elements, kw): """Special pregenerator for artwork URLs, which also include a title sometimes. """ artwork = kw.pop('artwork') kw['id'] = artwork.id # n.b.: this won't hurt anything if the route doesn't have {title}, so it's # calculated and thrown away. bad? if artwork.title: kw['title'] = '-' + _make_url_friendly(artwork.title) else: kw['title'] = '' return elements, kw def _make_url_friendly(title): """Given a title that will be used as flavor text in a URL, returns a string that will look less like garbage in an address bar. """ # RFC 3986 section 2.3 says: letters, numbers, and -_.~ are unreserved return re.sub('[^-_.~a-zA-Z0-9]', '-', title) def filestore_pregenerator(request, elements, kw): """Pregenerator for the filestore, which may run under a different domain name in the case of a CDN cacher thinger. """ cdn_root = request.registry.settings.get('cdn_root') if cdn_root: kw['_app_url'] = cdn_root return elements, kw
"""Constructs a chain of url prefixes going up to the root.""" if self.parent_router: ret = self.parent_router.full_url_prefix else: ret = '' ret += self.url_prefix return ret
identifier_body
routing.py
"""Routing configuration, broken out separately for ease of consultation without going through the whole app config everything. Some useful helpers are at the bottom. Be familiar with them! """ import re import floof.model as model from floof.resource import contextualize from pyramid.exceptions import NotFound from sqlalchemy.orm.exc import NoResultFound def configure_routing(config): """Adds route declarations to the app config.""" # Static file access. Separate root for each subdirectory, because Pyramid # treats these as first-class routables rather than a last-ditch fallback config.add_static_view('/css', 'floof:assets/css') config.add_static_view('/files', 'floof:assets/files') # dummy file store config.add_static_view('/icons', 'floof:assets/icons') config.add_static_view('/images', 'floof:assets/images') config.add_static_view('/js', 'floof:assets/js') # TODO this doesn't actually work config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico') r = config.add_route # Miscellaneous root stuff r('root', '/') r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator) r('reproxy', '/reproxy') r('log', '/log') # Registration and auth r('account.login', '/account/login') r('account.login_begin', '/account/login_begin') r('account.login_finish', '/account/login_finish') r('account.register', '/account/register') r('account.add_identity', '/account/add_identity') r('account.persona.login', '/account/persona/login') r('account.logout', '/account/logout') r('account.profile', '/account/profile') # Regular user control panel r('controls.index', '/account/controls') r('controls.auth', '/account/controls/authentication') r('controls.persona', '/account/controls/persona') r('controls.persona.add', '/account/controls/persona/add') r('controls.persona.remove', '/account/controls/persona/remove') r('controls.openid', '/account/controls/openid') r('controls.openid.add', '/account/controls/openid/add') r('controls.openid.add_finish', '/account/controls/openid/add_finish') r('controls.openid.remove', '/account/controls/openid/remove') r('controls.rels', '/account/controls/relationships') r('controls.rels.watch', '/account/controls/relationships/watch') r('controls.rels.unwatch', '/account/controls/relationships/unwatch') r('controls.info', '/account/controls/user_info') r('controls.certs', '/account/controls/certificates') r('controls.certs.add', '/account/controls/certificates/add') r('controls.certs.generate_server', '/account/controls/certificates/gen/cert-{name}.p12') r('controls.certs.details', '/account/controls/certificates/details/{serial:[0-9a-f]+}') r('controls.certs.download', '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem') r('controls.certs.revoke', '/account/controls/certificates/revoke/{serial:[0-9a-f]+}') # User pages kw = sqla_route_options('user', 'name', model.User.name) r('users.view', '/users/{name}', **kw) r('users.art', '/users/{name}/art', **kw) r('users.art_by_album', '/users/{name}/art/{album}', **kw) r('users.profile', '/users/{name}/profile', **kw) r('users.watchstream', '/users/{name}/watchstream', **kw) r('albums.user_index', '/users/{name}/albums', **kw) r('api:users.list', '/users.json') # Artwork kw = sqla_route_options('artwork', 'id', model.Artwork.id) kw['pregenerator'] = artwork_pregenerator r('art.browse', '/art') r('art.upload', '/art/upload') r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw) r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw) r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw) r('art.rate', r'/art/{id:\d+}/rate', **kw) # Tags # XXX what should the tag name regex be, if anything? # XXX should the regex be checked in the 'factory' instead? way easier that way... kw = sqla_route_options('tag', 'name', model.Tag.name) r('tags.list', '/tags') r('tags.view', '/tags/{name}', **kw) r('tags.artwork', '/tags/{name}/artwork', **kw) # Albums # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has user_router = SugarRouter(config, '/users/{user}', model.User.name) album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user) album_router.add_route('albums.artwork', '') # Administration r('admin.dashboard', '/admin') r('admin.log', '/admin/log') # Debugging r('debug.blank', '/debug/blank') r('debug.crash', '/debug/crash') r('debug.mako-crash', '/debug/mako-crash') r('debug.status.303', '/debug/303') r('debug.status.400', '/debug/400') r('debug.status.403', '/debug/403') r('debug.status.404', '/debug/404') # Comments; made complex because they can attach to different parent URLs. # Rather than hack around how Pyramid's routes works, we can just use our # own class that does what we want! # XXX 1: make this work for users as well # XXX 2: make the other routes work # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes parent_route_names = ('art.view', 'user.view') mapper = config.get_routes_mapper() parent_routes = [mapper.get_route(name) for name in parent_route_names] commentables = dict( users=model.User.name, art=model.Artwork.id, ) def comments_factory(request): # XXX prefetching on these? type = request.matchdict['type'] identifier = request.matchdict['identifier'] try: sqla_column = commentables[type] entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one() except (NoResultFound, KeyError): # 404! raise NotFound() if 'comment_id' not in request.matchdict: return contextualize(entity.discussion) # URLs to specific comments should have those comments as the context try: return contextualize( model.session .query(model.Comment) .with_parent(entity.discussion) .filter(model.Comment.id == request.matchdict['comment_id']) .one()) except NoResultFound: raise NotFound() def comments_pregenerator(request, elements, kw): resource = None comment = kw.get('comment', None) if comment: kw['comment_id'] = comment.id if 'resource' not in kw: resource = comment.discussion.resource if not resource: resource = kw['resource'] # XXX users... entity = resource.member kw['type'] = 'art' kw['identifier'] = entity.id return elements, kw r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory) r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator) class SugarRouter(object): """Glues routing to the ORM. Use me like this: foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier) foo_router.add_route('foo_edit', '/edit') This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the context will be set to the corresponding `Foo` object. The reverse works as well: request.route_url('foo_edit', foo=some_foo_row) """ # TODO: support URLs like /art/123-title-that-doesnt-matter # ...but only do it for the root url, i think def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None): self.config = config self.url_prefix = url_prefix self.sqla_column = sqla_column self.sqla_table = sqla_column.parententity self.parent_router = parent_router self.sqla_rel = rel assert (self.parent_router is None) == (self.sqla_rel is None) # This is the {key} that appears in the matchdict and generated route, # as well as the kwarg passed to route_url match = re.search(r'[{](\w+)[}]', url_prefix) if not match:
self.key = match.group(1) ### Dealing with chaining def chain(self, url_prefix, sqla_column, rel): """Create a new sugar router with this one as the parent.""" return self.__class__( self.config, url_prefix, sqla_column, parent_router=self, rel=rel) @property def full_url_prefix(self): """Constructs a chain of url prefixes going up to the root.""" if self.parent_router: ret = self.parent_router.full_url_prefix else: ret = '' ret += self.url_prefix return ret def filter_sqlalchemy_query(self, query, request): """Takes a query, filters it as demanded by the matchdict, and returns a new one. """ query = query.filter(self.sqla_column == request.matchdict[self.key]) if self.parent_router: query = query.join(self.sqla_rel) query = self.parent_router.filter_sqlalchemy_query( query, request) return query ### Actual routing stuff def add_route(self, route_name, suffix, **kwargs): """Analog to `config.add_route()`, with magic baked in. Extra kwargs are passed along. """ kwargs['pregenerator'] = self.pregenerator kwargs['factory'] = self.factory self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs) def pregenerator(self, request, elements, kw): """Passed to Pyramid as a bound method when creating a route. Converts the arguments to route_url (which should be row objects) into URL-friendly strings. """ # Get the row object, and get the property from it row = kw.pop(self.key) kw[self.key] = self.sqla_column.__get__(row, type(row)) if self.parent_router: # Parent needs its own treatment here, too. Fill in the parent # object automatically kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row)) elements, kw = self.parent_router.pregenerator(request, elements, kw) return elements, kw def factory(self, request): """Passed to Pyramid as a bound method when creating a route. Translates a matched URL to an ORM row, which becomes the context. """ # This yields the "context", which should be the row object try: q = model.session.query(self.sqla_table) q = self.filter_sqlalchemy_query(q, request) return q.one() except NoResultFound: # 404! raise NotFound() def sqla_route_options(url_key, match_key, sqla_column): """Returns a dict of route options that are helpful for routes representing SQLA objects. ``url_key``: The key to use for a SQLA object when calling ``route_url()``. ``match_key``: The key in the matchdict that contains the row identifier. ``sqla_column``: The SQLA ORM column that appears in the URL. """ def pregenerator(request, elements, kw): # Get the row object, and get the property from it row = kw.pop(url_key) kw[match_key] = sqla_column.__get__(row, type(row)) return elements, kw def factory(request): # This yields the "context", which should be the row object try: return contextualize( model.session.query(sqla_column.parententity) .filter(sqla_column == request.matchdict[match_key]) .one()) except NoResultFound: # 404! raise NotFound() return dict(pregenerator=pregenerator, factory=factory) def artwork_pregenerator(request, elements, kw): """Special pregenerator for artwork URLs, which also include a title sometimes. """ artwork = kw.pop('artwork') kw['id'] = artwork.id # n.b.: this won't hurt anything if the route doesn't have {title}, so it's # calculated and thrown away. bad? if artwork.title: kw['title'] = '-' + _make_url_friendly(artwork.title) else: kw['title'] = '' return elements, kw def _make_url_friendly(title): """Given a title that will be used as flavor text in a URL, returns a string that will look less like garbage in an address bar. """ # RFC 3986 section 2.3 says: letters, numbers, and -_.~ are unreserved return re.sub('[^-_.~a-zA-Z0-9]', '-', title) def filestore_pregenerator(request, elements, kw): """Pregenerator for the filestore, which may run under a different domain name in the case of a CDN cacher thinger. """ cdn_root = request.registry.settings.get('cdn_root') if cdn_root: kw['_app_url'] = cdn_root return elements, kw
raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix))
conditional_block
routing.py
"""Routing configuration, broken out separately for ease of consultation without going through the whole app config everything. Some useful helpers are at the bottom. Be familiar with them! """ import re import floof.model as model from floof.resource import contextualize from pyramid.exceptions import NotFound from sqlalchemy.orm.exc import NoResultFound def configure_routing(config): """Adds route declarations to the app config.""" # Static file access. Separate root for each subdirectory, because Pyramid # treats these as first-class routables rather than a last-ditch fallback config.add_static_view('/css', 'floof:assets/css') config.add_static_view('/files', 'floof:assets/files') # dummy file store config.add_static_view('/icons', 'floof:assets/icons') config.add_static_view('/images', 'floof:assets/images') config.add_static_view('/js', 'floof:assets/js') # TODO this doesn't actually work config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico') r = config.add_route # Miscellaneous root stuff r('root', '/') r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator) r('reproxy', '/reproxy') r('log', '/log') # Registration and auth r('account.login', '/account/login') r('account.login_begin', '/account/login_begin') r('account.login_finish', '/account/login_finish') r('account.register', '/account/register') r('account.add_identity', '/account/add_identity') r('account.persona.login', '/account/persona/login') r('account.logout', '/account/logout') r('account.profile', '/account/profile') # Regular user control panel r('controls.index', '/account/controls') r('controls.auth', '/account/controls/authentication') r('controls.persona', '/account/controls/persona') r('controls.persona.add', '/account/controls/persona/add') r('controls.persona.remove', '/account/controls/persona/remove') r('controls.openid', '/account/controls/openid') r('controls.openid.add', '/account/controls/openid/add') r('controls.openid.add_finish', '/account/controls/openid/add_finish') r('controls.openid.remove', '/account/controls/openid/remove') r('controls.rels', '/account/controls/relationships') r('controls.rels.watch', '/account/controls/relationships/watch') r('controls.rels.unwatch', '/account/controls/relationships/unwatch') r('controls.info', '/account/controls/user_info') r('controls.certs', '/account/controls/certificates') r('controls.certs.add', '/account/controls/certificates/add') r('controls.certs.generate_server', '/account/controls/certificates/gen/cert-{name}.p12') r('controls.certs.details', '/account/controls/certificates/details/{serial:[0-9a-f]+}') r('controls.certs.download', '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem') r('controls.certs.revoke', '/account/controls/certificates/revoke/{serial:[0-9a-f]+}') # User pages kw = sqla_route_options('user', 'name', model.User.name) r('users.view', '/users/{name}', **kw) r('users.art', '/users/{name}/art', **kw) r('users.art_by_album', '/users/{name}/art/{album}', **kw) r('users.profile', '/users/{name}/profile', **kw) r('users.watchstream', '/users/{name}/watchstream', **kw) r('albums.user_index', '/users/{name}/albums', **kw) r('api:users.list', '/users.json') # Artwork kw = sqla_route_options('artwork', 'id', model.Artwork.id) kw['pregenerator'] = artwork_pregenerator r('art.browse', '/art') r('art.upload', '/art/upload') r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw) r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw) r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw) r('art.rate', r'/art/{id:\d+}/rate', **kw) # Tags # XXX what should the tag name regex be, if anything? # XXX should the regex be checked in the 'factory' instead? way easier that way... kw = sqla_route_options('tag', 'name', model.Tag.name) r('tags.list', '/tags') r('tags.view', '/tags/{name}', **kw) r('tags.artwork', '/tags/{name}/artwork', **kw) # Albums # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has user_router = SugarRouter(config, '/users/{user}', model.User.name) album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user) album_router.add_route('albums.artwork', '') # Administration r('admin.dashboard', '/admin') r('admin.log', '/admin/log') # Debugging r('debug.blank', '/debug/blank') r('debug.crash', '/debug/crash') r('debug.mako-crash', '/debug/mako-crash') r('debug.status.303', '/debug/303') r('debug.status.400', '/debug/400') r('debug.status.403', '/debug/403') r('debug.status.404', '/debug/404') # Comments; made complex because they can attach to different parent URLs. # Rather than hack around how Pyramid's routes works, we can just use our # own class that does what we want! # XXX 1: make this work for users as well # XXX 2: make the other routes work # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes parent_route_names = ('art.view', 'user.view') mapper = config.get_routes_mapper() parent_routes = [mapper.get_route(name) for name in parent_route_names] commentables = dict( users=model.User.name, art=model.Artwork.id, ) def comments_factory(request): # XXX prefetching on these? type = request.matchdict['type'] identifier = request.matchdict['identifier'] try: sqla_column = commentables[type] entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one() except (NoResultFound, KeyError): # 404! raise NotFound() if 'comment_id' not in request.matchdict: return contextualize(entity.discussion) # URLs to specific comments should have those comments as the context try: return contextualize( model.session .query(model.Comment) .with_parent(entity.discussion) .filter(model.Comment.id == request.matchdict['comment_id']) .one()) except NoResultFound: raise NotFound() def comments_pregenerator(request, elements, kw): resource = None comment = kw.get('comment', None) if comment: kw['comment_id'] = comment.id if 'resource' not in kw: resource = comment.discussion.resource if not resource: resource = kw['resource'] # XXX users... entity = resource.member kw['type'] = 'art' kw['identifier'] = entity.id return elements, kw r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory) r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator) class SugarRouter(object): """Glues routing to the ORM. Use me like this: foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier) foo_router.add_route('foo_edit', '/edit') This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the context will be set to the corresponding `Foo` object. The reverse works as well: request.route_url('foo_edit', foo=some_foo_row) """ # TODO: support URLs like /art/123-title-that-doesnt-matter # ...but only do it for the root url, i think def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None): self.config = config self.url_prefix = url_prefix self.sqla_column = sqla_column self.sqla_table = sqla_column.parententity self.parent_router = parent_router self.sqla_rel = rel assert (self.parent_router is None) == (self.sqla_rel is None) # This is the {key} that appears in the matchdict and generated route, # as well as the kwarg passed to route_url match = re.search(r'[{](\w+)[}]', url_prefix) if not match: raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix)) self.key = match.group(1) ### Dealing with chaining def chain(self, url_prefix, sqla_column, rel): """Create a new sugar router with this one as the parent.""" return self.__class__( self.config, url_prefix, sqla_column, parent_router=self, rel=rel) @property def full_url_prefix(self): """Constructs a chain of url prefixes going up to the root.""" if self.parent_router: ret = self.parent_router.full_url_prefix else: ret = '' ret += self.url_prefix return ret def filter_sqlalchemy_query(self, query, request): """Takes a query, filters it as demanded by the matchdict, and returns a new one. """ query = query.filter(self.sqla_column == request.matchdict[self.key]) if self.parent_router: query = query.join(self.sqla_rel) query = self.parent_router.filter_sqlalchemy_query( query, request) return query ### Actual routing stuff def add_route(self, route_name, suffix, **kwargs): """Analog to `config.add_route()`, with magic baked in. Extra kwargs are passed along. """ kwargs['pregenerator'] = self.pregenerator kwargs['factory'] = self.factory self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs) def pregenerator(self, request, elements, kw): """Passed to Pyramid as a bound method when creating a route. Converts the arguments to route_url (which should be row objects) into URL-friendly strings. """ # Get the row object, and get the property from it row = kw.pop(self.key) kw[self.key] = self.sqla_column.__get__(row, type(row)) if self.parent_router: # Parent needs its own treatment here, too. Fill in the parent # object automatically kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row)) elements, kw = self.parent_router.pregenerator(request, elements, kw) return elements, kw def factory(self, request): """Passed to Pyramid as a bound method when creating a route. Translates a matched URL to an ORM row, which becomes the context. """ # This yields the "context", which should be the row object try: q = model.session.query(self.sqla_table) q = self.filter_sqlalchemy_query(q, request) return q.one() except NoResultFound: # 404! raise NotFound() def sqla_route_options(url_key, match_key, sqla_column): """Returns a dict of route options that are helpful for routes representing SQLA objects. ``url_key``: The key to use for a SQLA object when calling ``route_url()``. ``match_key``: The key in the matchdict that contains the row identifier. ``sqla_column``: The SQLA ORM column that appears in the URL. """ def pregenerator(request, elements, kw): # Get the row object, and get the property from it row = kw.pop(url_key) kw[match_key] = sqla_column.__get__(row, type(row)) return elements, kw def factory(request): # This yields the "context", which should be the row object try: return contextualize( model.session.query(sqla_column.parententity) .filter(sqla_column == request.matchdict[match_key]) .one()) except NoResultFound: # 404! raise NotFound() return dict(pregenerator=pregenerator, factory=factory) def artwork_pregenerator(request, elements, kw): """Special pregenerator for artwork URLs, which also include a title sometimes. """ artwork = kw.pop('artwork') kw['id'] = artwork.id # n.b.: this won't hurt anything if the route doesn't have {title}, so it's # calculated and thrown away. bad? if artwork.title: kw['title'] = '-' + _make_url_friendly(artwork.title) else: kw['title'] = '' return elements, kw def
(title): """Given a title that will be used as flavor text in a URL, returns a string that will look less like garbage in an address bar. """ # RFC 3986 section 2.3 says: letters, numbers, and -_.~ are unreserved return re.sub('[^-_.~a-zA-Z0-9]', '-', title) def filestore_pregenerator(request, elements, kw): """Pregenerator for the filestore, which may run under a different domain name in the case of a CDN cacher thinger. """ cdn_root = request.registry.settings.get('cdn_root') if cdn_root: kw['_app_url'] = cdn_root return elements, kw
_make_url_friendly
identifier_name
routing.py
"""Routing configuration, broken out separately for ease of consultation without going through the whole app config everything. Some useful helpers are at the bottom. Be familiar with them! """ import re import floof.model as model from floof.resource import contextualize from pyramid.exceptions import NotFound from sqlalchemy.orm.exc import NoResultFound def configure_routing(config): """Adds route declarations to the app config.""" # Static file access. Separate root for each subdirectory, because Pyramid # treats these as first-class routables rather than a last-ditch fallback config.add_static_view('/css', 'floof:assets/css') config.add_static_view('/files', 'floof:assets/files') # dummy file store config.add_static_view('/icons', 'floof:assets/icons') config.add_static_view('/images', 'floof:assets/images') config.add_static_view('/js', 'floof:assets/js') # TODO this doesn't actually work config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico') r = config.add_route # Miscellaneous root stuff r('root', '/') r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator) r('reproxy', '/reproxy') r('log', '/log') # Registration and auth r('account.login', '/account/login') r('account.login_begin', '/account/login_begin') r('account.login_finish', '/account/login_finish') r('account.register', '/account/register') r('account.add_identity', '/account/add_identity') r('account.persona.login', '/account/persona/login') r('account.logout', '/account/logout') r('account.profile', '/account/profile') # Regular user control panel r('controls.index', '/account/controls') r('controls.auth', '/account/controls/authentication') r('controls.persona', '/account/controls/persona') r('controls.persona.add', '/account/controls/persona/add') r('controls.persona.remove', '/account/controls/persona/remove') r('controls.openid', '/account/controls/openid') r('controls.openid.add', '/account/controls/openid/add') r('controls.openid.add_finish', '/account/controls/openid/add_finish') r('controls.openid.remove', '/account/controls/openid/remove') r('controls.rels', '/account/controls/relationships') r('controls.rels.watch', '/account/controls/relationships/watch') r('controls.rels.unwatch', '/account/controls/relationships/unwatch') r('controls.info', '/account/controls/user_info') r('controls.certs', '/account/controls/certificates') r('controls.certs.add', '/account/controls/certificates/add') r('controls.certs.generate_server', '/account/controls/certificates/gen/cert-{name}.p12') r('controls.certs.details', '/account/controls/certificates/details/{serial:[0-9a-f]+}') r('controls.certs.download', '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem') r('controls.certs.revoke', '/account/controls/certificates/revoke/{serial:[0-9a-f]+}') # User pages kw = sqla_route_options('user', 'name', model.User.name) r('users.view', '/users/{name}', **kw) r('users.art', '/users/{name}/art', **kw) r('users.art_by_album', '/users/{name}/art/{album}', **kw) r('users.profile', '/users/{name}/profile', **kw) r('users.watchstream', '/users/{name}/watchstream', **kw) r('albums.user_index', '/users/{name}/albums', **kw) r('api:users.list', '/users.json') # Artwork kw = sqla_route_options('artwork', 'id', model.Artwork.id) kw['pregenerator'] = artwork_pregenerator r('art.browse', '/art') r('art.upload', '/art/upload') r('art.view', r'/art/{id:\d+}{title:(-.+)?}', **kw) r('art.add_tags', r'/art/{id:\d+}/add_tags', **kw) r('art.remove_tags', r'/art/{id:\d+}/remove_tags', **kw) r('art.rate', r'/art/{id:\d+}/rate', **kw) # Tags # XXX what should the tag name regex be, if anything? # XXX should the regex be checked in the 'factory' instead? way easier that way... kw = sqla_route_options('tag', 'name', model.Tag.name) r('tags.list', '/tags') r('tags.view', '/tags/{name}', **kw) r('tags.artwork', '/tags/{name}/artwork', **kw) # Albums # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has user_router = SugarRouter(config, '/users/{user}', model.User.name) album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user) album_router.add_route('albums.artwork', '') # Administration r('admin.dashboard', '/admin') r('admin.log', '/admin/log') # Debugging r('debug.blank', '/debug/blank') r('debug.crash', '/debug/crash') r('debug.mako-crash', '/debug/mako-crash') r('debug.status.303', '/debug/303') r('debug.status.400', '/debug/400') r('debug.status.403', '/debug/403') r('debug.status.404', '/debug/404') # Comments; made complex because they can attach to different parent URLs. # Rather than hack around how Pyramid's routes works, we can just use our # own class that does what we want! # XXX 1: make this work for users as well # XXX 2: make the other routes work # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes parent_route_names = ('art.view', 'user.view') mapper = config.get_routes_mapper() parent_routes = [mapper.get_route(name) for name in parent_route_names] commentables = dict( users=model.User.name, art=model.Artwork.id, ) def comments_factory(request): # XXX prefetching on these? type = request.matchdict['type'] identifier = request.matchdict['identifier'] try: sqla_column = commentables[type] entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one() except (NoResultFound, KeyError): # 404! raise NotFound() if 'comment_id' not in request.matchdict: return contextualize(entity.discussion) # URLs to specific comments should have those comments as the context try: return contextualize( model.session .query(model.Comment) .with_parent(entity.discussion) .filter(model.Comment.id == request.matchdict['comment_id']) .one()) except NoResultFound: raise NotFound() def comments_pregenerator(request, elements, kw): resource = None comment = kw.get('comment', None) if comment: kw['comment_id'] = comment.id if 'resource' not in kw: resource = comment.discussion.resource if not resource: resource = kw['resource'] # XXX users... entity = resource.member kw['type'] = 'art' kw['identifier'] = entity.id return elements, kw r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory) r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator) r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator) class SugarRouter(object): """Glues routing to the ORM. Use me like this: foo_router = SugarRouter(config, '/foos/{foo}', model.Foo.identifier) foo_router.add_route('foo_edit', '/edit') This will route `/foos/{foo}/edit` to `foo_edit`, with the bonus that the context will be set to the corresponding `Foo` object. The reverse works as well: request.route_url('foo_edit', foo=some_foo_row) """ # TODO: support URLs like /art/123-title-that-doesnt-matter # ...but only do it for the root url, i think def __init__(self, config, url_prefix, sqla_column, parent_router=None, rel=None): self.config = config self.url_prefix = url_prefix self.sqla_column = sqla_column self.sqla_table = sqla_column.parententity self.parent_router = parent_router self.sqla_rel = rel assert (self.parent_router is None) == (self.sqla_rel is None) # This is the {key} that appears in the matchdict and generated route, # as well as the kwarg passed to route_url match = re.search(r'[{](\w+)[}]', url_prefix) if not match: raise ValueError("Can't find a route kwarg in {0!r}".format(url_prefix)) self.key = match.group(1) ### Dealing with chaining def chain(self, url_prefix, sqla_column, rel): """Create a new sugar router with this one as the parent.""" return self.__class__( self.config, url_prefix, sqla_column, parent_router=self, rel=rel) @property def full_url_prefix(self): """Constructs a chain of url prefixes going up to the root.""" if self.parent_router: ret = self.parent_router.full_url_prefix else: ret = '' ret += self.url_prefix return ret
"""Takes a query, filters it as demanded by the matchdict, and returns a new one. """ query = query.filter(self.sqla_column == request.matchdict[self.key]) if self.parent_router: query = query.join(self.sqla_rel) query = self.parent_router.filter_sqlalchemy_query( query, request) return query ### Actual routing stuff def add_route(self, route_name, suffix, **kwargs): """Analog to `config.add_route()`, with magic baked in. Extra kwargs are passed along. """ kwargs['pregenerator'] = self.pregenerator kwargs['factory'] = self.factory self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs) def pregenerator(self, request, elements, kw): """Passed to Pyramid as a bound method when creating a route. Converts the arguments to route_url (which should be row objects) into URL-friendly strings. """ # Get the row object, and get the property from it row = kw.pop(self.key) kw[self.key] = self.sqla_column.__get__(row, type(row)) if self.parent_router: # Parent needs its own treatment here, too. Fill in the parent # object automatically kw[self.parent_router.key] = self.sqla_rel.__get__(row, type(row)) elements, kw = self.parent_router.pregenerator(request, elements, kw) return elements, kw def factory(self, request): """Passed to Pyramid as a bound method when creating a route. Translates a matched URL to an ORM row, which becomes the context. """ # This yields the "context", which should be the row object try: q = model.session.query(self.sqla_table) q = self.filter_sqlalchemy_query(q, request) return q.one() except NoResultFound: # 404! raise NotFound() def sqla_route_options(url_key, match_key, sqla_column): """Returns a dict of route options that are helpful for routes representing SQLA objects. ``url_key``: The key to use for a SQLA object when calling ``route_url()``. ``match_key``: The key in the matchdict that contains the row identifier. ``sqla_column``: The SQLA ORM column that appears in the URL. """ def pregenerator(request, elements, kw): # Get the row object, and get the property from it row = kw.pop(url_key) kw[match_key] = sqla_column.__get__(row, type(row)) return elements, kw def factory(request): # This yields the "context", which should be the row object try: return contextualize( model.session.query(sqla_column.parententity) .filter(sqla_column == request.matchdict[match_key]) .one()) except NoResultFound: # 404! raise NotFound() return dict(pregenerator=pregenerator, factory=factory) def artwork_pregenerator(request, elements, kw): """Special pregenerator for artwork URLs, which also include a title sometimes. """ artwork = kw.pop('artwork') kw['id'] = artwork.id # n.b.: this won't hurt anything if the route doesn't have {title}, so it's # calculated and thrown away. bad? if artwork.title: kw['title'] = '-' + _make_url_friendly(artwork.title) else: kw['title'] = '' return elements, kw def _make_url_friendly(title): """Given a title that will be used as flavor text in a URL, returns a string that will look less like garbage in an address bar. """ # RFC 3986 section 2.3 says: letters, numbers, and -_.~ are unreserved return re.sub('[^-_.~a-zA-Z0-9]', '-', title) def filestore_pregenerator(request, elements, kw): """Pregenerator for the filestore, which may run under a different domain name in the case of a CDN cacher thinger. """ cdn_root = request.registry.settings.get('cdn_root') if cdn_root: kw['_app_url'] = cdn_root return elements, kw
def filter_sqlalchemy_query(self, query, request):
random_line_split
mod.rs
// Copyright © 2020 Brian Merchant. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub mod quantity; use crate::cell::chemistry::RgtpDistribution; use crate::math::geometry::{calc_poly_area, BBox}; use crate::math::v2d::V2d; use crate::parameters::quantity::{ Diffusion, Force, Length, Quantity, Stress, Time, Tinv, Viscosity, }; use crate::NVERTS; use modify_derive::Modify; use rand_distr::num_traits::Pow; use serde::{Deserialize, Serialize}; use std::f64::consts::PI; /// Characteristic quantities used for normalization. #[derive( Clone, Copy, Deserialize, Serialize, Default, Debug, PartialEq, Modify, )] pub struct CharacteristicQuantities { pub eta: Viscosity, pub f: Force, pub l: Length, pub t: Time, pub l3d: Length, pub kgtp: Tinv, } impl CharacteristicQuantities { /// Given a quantity `q`, normalize its units using the primary units `f` (Force), /// `l` (`Length`) and `t` (`Time`) provided in `CharQuants`. pub fn normalize<T: Quantity>(&self, q: &T) -> f64 { let q = q.g(); let u = q.units(); (q * self.f.pow(-1.0 * u.f) * self.l.pow(-1.0 * u.l) * self.t.pow(-1.0 * u.t)) .number() } pub fn time(&self) -> f64 { self.t.0 } } #[derive( Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Modify, )] pub struct RawCloseBounds { pub zero_at: Length, pub one_at: Length, } impl RawCloseBounds { pub fn new(zero_at: Length, one_at: Length) -> RawCloseBounds { RawCloseBounds { zero_at, one_at } } } #[derive( Copy, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Modify, )] pub struct RawPhysicalContactParams { pub crl_one_at: Length, pub zero_at: Length, pub cil_mag: f64, pub adh_break: Option<Length>, pub adh_mag: Option<Force>, pub cal_mag: Option<f64>, } impl RawPhysicalContactParams { pub fn refine( &self, cq: &CharacteristicQuantities, ) -> PhysicalContactParams { let zero_at = cq.normalize(&self.zero_at); let crl_one_at = cq.normalize(&self.crl_one_at); let adh_break = cq.normalize(&self.adh_break.unwrap_or(self.crl_one_at)); let adh_rest = 0.5 * adh_break; PhysicalContactParams { zero_at, zero_at_sq: zero_at.pow(2), crl_one_at, adh_rest, adh_break, adh_mag: self.adh_mag.map(|adh_mag| cq.normalize(&adh_mag)), cal_mag: self.cal_mag, cil_mag: self.cil_mag, } } } #[derive( Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify, )] pub struct RawCoaParams { /// Factor controlling to what extent line-of-sight blockage should be /// penalized. pub los_penalty: f64, /// Distance from point of emission at which COA signal reaches half /// its maximum value. pub halfmax_dist: Length, /// Magnitude of COA. It will be divided by `NVERTS` so that it scales based /// on the number of vertices. pub mag: f64, /// If two vertices are within this distance, then COA cannot occur between them. pub too_close_dist: Length, } impl RawCoaParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> CoaParams { let halfmax_dist = bq.normalize(&self.halfmax_dist); CoaParams { los_penalty: self.los_penalty, halfmax_dist, vertex_mag: self.mag / NVERTS as f64, // self.mag * exp(distrib_exp * x), where x is distance // between points. distrib_exp: 0.5f64.ln() / halfmax_dist, too_close_dist_sq: bq.normalize(&self.too_close_dist).pow(2), } } } #[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)] pub struct RawChemAttrParams { pub center: [Length; 2], pub mag: f64, pub drop_per_char_l: f64, pub char_l: Length, } impl RawChemAttrParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> ChemAttrParams { ChemAttrParams { center: V2d { x: bq.normalize(&self.center[0]), y: bq.normalize(&self.center[1]), }, center_mag: self.mag, slope: self.drop_per_char_l / bq.normalize(&self.char_l), } } } #[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)] pub struct RawBdryParams { shape: [[Length; 2]; 4], skip_bb_check: bool, mag: f64, } impl RawBdryParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> BdryParams { let shape = self .shape .iter() .map(|p| V2d { x: bq.normalize(&p[0]), y: bq.normalize(&p[1]), }) .collect::<Vec<V2d>>(); let bbox = BBox::from_points(&shape); BdryParams { shape, bbox, skip_bb_check: self.skip_bb_check, mag: self.mag, } } } #[derive( Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify, )] pub struct RawInteractionParams { pub coa: Option<RawCoaParams>, pub chem_attr: Option<RawChemAttrParams>, pub bdry: Option<RawBdryParams>, pub phys_contact: RawPhysicalContactParams, } impl RawInteractionParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams { InteractionParams { coa: self.coa.as_ref().map(|coa| coa.refine(bq)), chem_attr: self .chem_attr .as_ref() .map(|chem_attr| chem_attr.refine(bq)), bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)), phys_contact: self.phys_contact.refine(bq), } } } #[derive( Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify, )] pub struct RawWorldParameters { pub vertex_eta: Viscosity, pub interactions: RawInteractionParams, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct PhysicalContactParams { /// If two points are within this range, then they are considered /// to be in contact for the purposes of CRL and adhesion. pub zero_at: f64, /// The square of `zero_at`. pub zero_at_sq: f64, /// If two points are within this range, then they are considered /// to be in maximal contact, so that there is no smoothing factor /// applied to CRL (i.e. the smoothing factor is `1.0`). pub crl_one_at: f64, /// The resting length of an adhesion. Same as `range.one_at * 0.8`. pub adh_rest: f64, /// This is distance at which the adhesion bond starts breaking/stops developing. pub adh_break: f64, /// Optional adhesion magnitude. If it is `None`, no adhesion /// will be calculated. pub adh_mag: Option<f64>, /// Optional CAL magnitude. If it is `None`, simulation will /// always execute CIL upon contact. pub cal_mag: Option<f64>, /// Magnitude of CIL that acts on Rho GTPase activation/ /// inactivation rates. pub cil_mag: f64, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)] pub struct CoaParams { //TODO: Expand upon LOS system. /// Factor controlling to what extent line-of-sight blockage /// should be penalized. See SI for further information. pub los_penalty: f64, /// The distance at which COA signal reaches half-maximum value. pub halfmax_dist: f64, /// Magnitude of COA that acts on Rac1 activation rates. pub vertex_mag: f64, //TODO: look up exactly what is being done for this (see where // parameter is being generated for hint). /// Factor controlling the shape of the exponential modelling /// COA interaction (a function shaping parameter). It determines /// the distance at which two points would sense COA at half-max /// magnitude. pub distrib_exp: f64, /// If two vertices are within the square root of this distance , then COA cannot occur between /// them. pub too_close_dist_sq: f64, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)] pub struct ChemAttrParams { /// Location of the chemoattractant center. pub center: V2d, /// Magnitude of chemoattractant a cell would sense if it were /// right on top of the chemoattractant source. pub center_mag: f64, /// Assuming shallow chemoattractant gradient, which can be /// modelled using a linear function with slope `slope`. pub slope: f64, } #[derive(Clone, Deserialize, Serialize, PartialEq, Debug)] pub struct BdryParams { /// Shape of the boundary. pub shape: Vec<V2d>, /// Bounding box of the boundary. pub bbox: BBox, /// Should boundary bounding box be checked to see if cell is /// within the boundary? pub skip_bb_check: bool, /// Magnitude of CIL-type interaction. pub mag: f64, } #[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct InteractionParams { pub phys_contact: PhysicalContactParams, pub coa: Option<CoaParams>, pub chem_attr: Option<ChemAttrParams>, pub bdry: Option<BdryParams>, } #[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct WorldParameters { /// Viscosity value used to calculate change in position of a /// vertex due to calculated forces on it. pub vertex_eta: f64, pub interactions: InteractionParams, } impl RawWorldParameters { pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters { WorldParameters { vertex_eta: bq.normalize(&self.vertex_eta), interactions: self.interactions.refine(bq), } } } /// The "raw", unprocessed, parameters that are supplied by the user. #[derive(Clone, Copy, Modify)] pub struct RawParameters { /// Cell diameter. pub cell_diam: Length, /// Fraction of max force achieved at `rgtp_act_at_max_f`. pub halfmax_rgtp_max_f_frac: f64, /// Stiffness of the membrane-cortex complex. pub stiffness_cortex: Stress, /// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm). pub lm_h: Length, /// Halfmax Rho GTPase activity. pub halfmax_rgtp_frac: f64, /// Lamellipod stall stress: how much stress can lamellipod exert at most. pub lm_ss: Stress, /// Friction force opposing RhoA pulling. pub rho_friction: f64, /// Stiffness of cytoplasm. pub stiffness_cyto: Force, /// Diffusion rate of Rho GTPase on membrane. pub diffusion_rgtp: Diffusion, /// Initial distribution of Rac1. pub init_rac: RgtpDistribution, /// Initial distribution of RhoA. pub init_rho: RgtpDistribution, /// Baseline Rac1 activation rate. pub kgtp_rac: Tinv, /// Rac1 auto-activation rate. pub kgtp_rac_auto: Tinv, /// Baseline Rac1 inactivation rate. pub kdgtp_rac: Tinv, /// RhoA mediated inhibition of Rac1. pub kdgtp_rho_on_rac: Tinv, /// Strain at which Rac1 tension-mediated inhibition is half-strength. pub halfmax_tension_inhib: f64, /// Maximum tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate. pub tension_inhib: f64, /// Rate at which inactive membrane bound Rho GTPase dissociates from the /// membrane. pub k_mem_off: Tinv, /// Rate at which cytosolic Rho GTPase associates with the membrane. pub k_mem_on: Tinv, /// Baseline RhoA activation rate. pub kgtp_rho: Tinv, /// RhoA auto-activation rate. pub kgtp_auto_rho: Tinv, /// Baseline RhoA inactivation rate. pub kdgtp_rho: Tinv, /// Rac1 mediated inhibition of RhoA. pub kdgtp_rac_on_rho: Tinv, /// Enable randomization of bursts in Rac1 activity? pub randomization: bool, /// Average period between randomization events. pub rand_avg_t: Time, /// Standard deviation of period between randomization events. pub rand_std_t: Time, /// Magnitude of randomly applied factor affecting Rac1 activation rate: how big a burst? pub rand_mag: f64, /// Fraction of vertices to be selected for increased Rac1 activation due to random events. pub rand_vs: f64, } #[derive(Copy, Clone, Deserialize, Serialize, Default, Debug, PartialEq)] pub struct Parameters { /// Resting cell radius. pub cell_r: f64, /// Resting edge length. pub rest_edge_len: f64, /// Resting area. pub rest_area: f64,
pub const_retractive: f64, /// Stiffness of cytoplasm. pub stiffness_cyto: f64, /// Rate of Rho GTPase GDI unbinding and subsequent membrane attachment. pub k_mem_on_vertex: f64, /// Rate of Rho GTPase membrane disassociation. pub k_mem_off: f64, /// Diffusion rate of Rho GTPase on membrane. pub diffusion_rgtp: f64, /// Initial distribution of Rac1. pub init_rac: RgtpDistribution, /// Initial distribution of RhoA. pub init_rho: RgtpDistribution, /// Halfmax Rho GTPase activity per vertex. pub halfmax_vertex_rgtp: f64, /// Halfmax Rho GTPase activity per vertex as concentration. pub halfmax_vertex_rgtp_conc: f64, /// Baseline Rac1 activation rate. pub kgtp_rac: f64, /// Rac1 auto-activation rate as a multiple of baseline Rac1 activation rate. pub kgtp_rac_auto: f64, /// Baseline Rac1 inactivation rate. pub kdgtp_rac: f64, /// RhoA mediated inhibition of Rac1 as a multiple of baseline Rac1 inactivation rate. pub kdgtp_rho_on_rac: f64, /// Strain at which Rac1 tension-mediated inhibition is half-strength. pub halfmax_tension_inhib: f64, /// Tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate. pub tension_inhib: f64, /// Baseline RhoA activation rate. pub kgtp_rho: f64, /// RhoA auto-activation rate as a multiple of baseline RhoA activation rate. pub kgtp_rho_auto: f64, /// Baseline RhoA inactivation rate. pub kdgtp_rho: f64, /// Rac1 mediated inhibition of RhoA as a multiple of baseline RhoA inactivation rate. pub kdgtp_rac_on_rho: f64, /// Enable randomization of bursts in Rac1 activity? pub randomization: bool, /// Average time between random events, in timepoints. pub rand_avg_t: f64, /// Standard deviation of time between random events, in timepoints. pub rand_std_t: f64, /// Magnitude of factor randomly applied to Rac1 activation rate. pub rand_mag: f64, /// Number of vertices to be selected for random Rac1 activity boost. pub num_rand_vs: u32, } impl RawParameters { pub fn refine(&self, bq: &CharacteristicQuantities) -> Parameters { let cell_r = self.cell_diam.scale(0.5); let rel = self.cell_diam.scale((PI / (NVERTS as f64)).sin()); let ra = Length(1.0) .pow(2.0) .scale(calc_init_cell_area(cell_r.number())); let const_protrusive = (self.lm_h.g() * self.lm_ss.g() * rel.g()) .scale(self.halfmax_rgtp_max_f_frac); let const_retractive = const_protrusive.scale(self.rho_friction); let halfmax_vertex_rgtp = self.halfmax_rgtp_frac / NVERTS as f64; let halfmax_vertex_rgtp_conc = rel.pow(-1.0).scale(halfmax_vertex_rgtp); let stiffness_edge = self.stiffness_cortex.g() * bq.l3d.g(); let stiffness_cyto = self.stiffness_cyto.g().scale(1.0 / NVERTS as f64); Parameters { cell_r: bq.normalize(&cell_r), rest_edge_len: bq.normalize(&rel), rest_area: bq.normalize(&ra), stiffness_edge: bq.normalize(&stiffness_edge), const_protrusive: bq.normalize(&const_protrusive), const_retractive: bq.normalize(&const_retractive), stiffness_cyto: bq.normalize(&stiffness_cyto), k_mem_on_vertex: bq.normalize(&self.k_mem_on) / NVERTS as f64, k_mem_off: bq.normalize(&self.k_mem_off), diffusion_rgtp: bq.normalize(&self.diffusion_rgtp), init_rac: self.init_rac, init_rho: self.init_rho, halfmax_vertex_rgtp, halfmax_vertex_rgtp_conc: bq.normalize(&halfmax_vertex_rgtp_conc), kgtp_rac: bq.normalize(&self.kgtp_rac), kgtp_rac_auto: bq.normalize(&self.kgtp_rac_auto), kdgtp_rac: bq.normalize(&self.kdgtp_rac), kdgtp_rho_on_rac: bq.normalize(&self.kdgtp_rho_on_rac), halfmax_tension_inhib: self.halfmax_tension_inhib, tension_inhib: self.tension_inhib, kgtp_rho: bq.normalize(&self.kgtp_rho), kgtp_rho_auto: bq.normalize(&self.kgtp_auto_rho), kdgtp_rho: bq.normalize(&self.kdgtp_rho), kdgtp_rac_on_rho: bq.normalize(&self.kdgtp_rac_on_rho), randomization: self.randomization, rand_avg_t: bq.normalize(&self.rand_avg_t).ceil(), rand_std_t: bq.normalize(&self.rand_std_t).ceil(), rand_mag: self.rand_mag, num_rand_vs: (self.rand_vs * NVERTS as f64) as u32, } } } /// Calculate the area of an "ideal" initial cell of radius R, if it has /// `NVERTS` vertices. pub fn calc_init_cell_area(r: f64) -> f64 { let poly_coords = (0..NVERTS) .map(|vix| { let theta = (vix as f64) / (NVERTS as f64) * 2.0 * PI; V2d { x: r * theta.cos(), y: r * theta.sin(), } }) .collect::<Vec<V2d>>(); calc_poly_area(&poly_coords) }
/// Stiffness of edge. pub stiffness_edge: f64, /// Rac1 mediated protrusive force constant. pub const_protrusive: f64, /// RhoA mediated protrusive force constant.
random_line_split
mod.rs
// Copyright © 2020 Brian Merchant. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub mod quantity; use crate::cell::chemistry::RgtpDistribution; use crate::math::geometry::{calc_poly_area, BBox}; use crate::math::v2d::V2d; use crate::parameters::quantity::{ Diffusion, Force, Length, Quantity, Stress, Time, Tinv, Viscosity, }; use crate::NVERTS; use modify_derive::Modify; use rand_distr::num_traits::Pow; use serde::{Deserialize, Serialize}; use std::f64::consts::PI; /// Characteristic quantities used for normalization. #[derive( Clone, Copy, Deserialize, Serialize, Default, Debug, PartialEq, Modify, )] pub struct CharacteristicQuantities { pub eta: Viscosity, pub f: Force, pub l: Length, pub t: Time, pub l3d: Length, pub kgtp: Tinv, } impl CharacteristicQuantities { /// Given a quantity `q`, normalize its units using the primary units `f` (Force), /// `l` (`Length`) and `t` (`Time`) provided in `CharQuants`. pub fn normalize<T: Quantity>(&self, q: &T) -> f64 { let q = q.g(); let u = q.units(); (q * self.f.pow(-1.0 * u.f) * self.l.pow(-1.0 * u.l) * self.t.pow(-1.0 * u.t)) .number() } pub fn time(&self) -> f64 { self.t.0 } } #[derive( Clone, Copy, Debug, Default, Deserialize, Serialize, PartialEq, Modify, )] pub struct RawCloseBounds { pub zero_at: Length, pub one_at: Length, } impl RawCloseBounds { pub fn new(zero_at: Length, one_at: Length) -> RawCloseBounds { RawCloseBounds { zero_at, one_at } } } #[derive( Copy, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Modify, )] pub struct RawPhysicalContactParams { pub crl_one_at: Length, pub zero_at: Length, pub cil_mag: f64, pub adh_break: Option<Length>, pub adh_mag: Option<Force>, pub cal_mag: Option<f64>, } impl RawPhysicalContactParams { pub fn refine( &self, cq: &CharacteristicQuantities, ) -> PhysicalContactParams { let zero_at = cq.normalize(&self.zero_at); let crl_one_at = cq.normalize(&self.crl_one_at); let adh_break = cq.normalize(&self.adh_break.unwrap_or(self.crl_one_at)); let adh_rest = 0.5 * adh_break; PhysicalContactParams { zero_at, zero_at_sq: zero_at.pow(2), crl_one_at, adh_rest, adh_break, adh_mag: self.adh_mag.map(|adh_mag| cq.normalize(&adh_mag)), cal_mag: self.cal_mag, cil_mag: self.cil_mag, } } } #[derive( Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify, )] pub struct RawCoaParams { /// Factor controlling to what extent line-of-sight blockage should be /// penalized. pub los_penalty: f64, /// Distance from point of emission at which COA signal reaches half /// its maximum value. pub halfmax_dist: Length, /// Magnitude of COA. It will be divided by `NVERTS` so that it scales based /// on the number of vertices. pub mag: f64, /// If two vertices are within this distance, then COA cannot occur between them. pub too_close_dist: Length, } impl RawCoaParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> CoaParams { let halfmax_dist = bq.normalize(&self.halfmax_dist); CoaParams { los_penalty: self.los_penalty, halfmax_dist, vertex_mag: self.mag / NVERTS as f64, // self.mag * exp(distrib_exp * x), where x is distance // between points. distrib_exp: 0.5f64.ln() / halfmax_dist, too_close_dist_sq: bq.normalize(&self.too_close_dist).pow(2), } } } #[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)] pub struct RawChemAttrParams { pub center: [Length; 2], pub mag: f64, pub drop_per_char_l: f64, pub char_l: Length, } impl RawChemAttrParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> ChemAttrParams { ChemAttrParams { center: V2d { x: bq.normalize(&self.center[0]), y: bq.normalize(&self.center[1]), }, center_mag: self.mag, slope: self.drop_per_char_l / bq.normalize(&self.char_l), } } } #[derive(Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug)] pub struct RawBdryParams { shape: [[Length; 2]; 4], skip_bb_check: bool, mag: f64, } impl RawBdryParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> BdryParams { let shape = self .shape .iter() .map(|p| V2d { x: bq.normalize(&p[0]), y: bq.normalize(&p[1]), }) .collect::<Vec<V2d>>(); let bbox = BBox::from_points(&shape); BdryParams { shape, bbox, skip_bb_check: self.skip_bb_check, mag: self.mag, } } } #[derive( Deserialize, Serialize, Clone, Copy, PartialEq, Default, Debug, Modify, )] pub struct RawInteractionParams { pub coa: Option<RawCoaParams>, pub chem_attr: Option<RawChemAttrParams>, pub bdry: Option<RawBdryParams>, pub phys_contact: RawPhysicalContactParams, } impl RawInteractionParams { pub fn refine(&self, bq: &CharacteristicQuantities) -> InteractionParams { InteractionParams { coa: self.coa.as_ref().map(|coa| coa.refine(bq)), chem_attr: self .chem_attr .as_ref() .map(|chem_attr| chem_attr.refine(bq)), bdry: self.bdry.as_ref().map(|bdry| bdry.refine(bq)), phys_contact: self.phys_contact.refine(bq), } } } #[derive( Deserialize, Serialize, Copy, Clone, PartialEq, Default, Debug, Modify, )] pub struct RawWorldParameters { pub vertex_eta: Viscosity, pub interactions: RawInteractionParams, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct PhysicalContactParams { /// If two points are within this range, then they are considered /// to be in contact for the purposes of CRL and adhesion. pub zero_at: f64, /// The square of `zero_at`. pub zero_at_sq: f64, /// If two points are within this range, then they are considered /// to be in maximal contact, so that there is no smoothing factor /// applied to CRL (i.e. the smoothing factor is `1.0`). pub crl_one_at: f64, /// The resting length of an adhesion. Same as `range.one_at * 0.8`. pub adh_rest: f64, /// This is distance at which the adhesion bond starts breaking/stops developing. pub adh_break: f64, /// Optional adhesion magnitude. If it is `None`, no adhesion /// will be calculated. pub adh_mag: Option<f64>, /// Optional CAL magnitude. If it is `None`, simulation will /// always execute CIL upon contact. pub cal_mag: Option<f64>, /// Magnitude of CIL that acts on Rho GTPase activation/ /// inactivation rates. pub cil_mag: f64, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)] pub struct CoaParams { //TODO: Expand upon LOS system. /// Factor controlling to what extent line-of-sight blockage /// should be penalized. See SI for further information. pub los_penalty: f64, /// The distance at which COA signal reaches half-maximum value. pub halfmax_dist: f64, /// Magnitude of COA that acts on Rac1 activation rates. pub vertex_mag: f64, //TODO: look up exactly what is being done for this (see where // parameter is being generated for hint). /// Factor controlling the shape of the exponential modelling /// COA interaction (a function shaping parameter). It determines /// the distance at which two points would sense COA at half-max /// magnitude. pub distrib_exp: f64, /// If two vertices are within the square root of this distance , then COA cannot occur between /// them. pub too_close_dist_sq: f64, } #[derive(Clone, Copy, Deserialize, Serialize, PartialEq, Debug)] pub struct ChemAttrParams { /// Location of the chemoattractant center. pub center: V2d, /// Magnitude of chemoattractant a cell would sense if it were /// right on top of the chemoattractant source. pub center_mag: f64, /// Assuming shallow chemoattractant gradient, which can be /// modelled using a linear function with slope `slope`. pub slope: f64, } #[derive(Clone, Deserialize, Serialize, PartialEq, Debug)] pub struct BdryParams { /// Shape of the boundary. pub shape: Vec<V2d>, /// Bounding box of the boundary. pub bbox: BBox, /// Should boundary bounding box be checked to see if cell is /// within the boundary? pub skip_bb_check: bool, /// Magnitude of CIL-type interaction. pub mag: f64, } #[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct InteractionParams { pub phys_contact: PhysicalContactParams, pub coa: Option<CoaParams>, pub chem_attr: Option<ChemAttrParams>, pub bdry: Option<BdryParams>, } #[derive(Clone, Deserialize, Serialize, PartialEq, Default, Debug)] pub struct W
{ /// Viscosity value used to calculate change in position of a /// vertex due to calculated forces on it. pub vertex_eta: f64, pub interactions: InteractionParams, } impl RawWorldParameters { pub fn refine(&self, bq: &CharacteristicQuantities) -> WorldParameters { WorldParameters { vertex_eta: bq.normalize(&self.vertex_eta), interactions: self.interactions.refine(bq), } } } /// The "raw", unprocessed, parameters that are supplied by the user. #[derive(Clone, Copy, Modify)] pub struct RawParameters { /// Cell diameter. pub cell_diam: Length, /// Fraction of max force achieved at `rgtp_act_at_max_f`. pub halfmax_rgtp_max_f_frac: f64, /// Stiffness of the membrane-cortex complex. pub stiffness_cortex: Stress, /// Typical lamellipod height: typical height of lamellipod (on the order of 100 nm). pub lm_h: Length, /// Halfmax Rho GTPase activity. pub halfmax_rgtp_frac: f64, /// Lamellipod stall stress: how much stress can lamellipod exert at most. pub lm_ss: Stress, /// Friction force opposing RhoA pulling. pub rho_friction: f64, /// Stiffness of cytoplasm. pub stiffness_cyto: Force, /// Diffusion rate of Rho GTPase on membrane. pub diffusion_rgtp: Diffusion, /// Initial distribution of Rac1. pub init_rac: RgtpDistribution, /// Initial distribution of RhoA. pub init_rho: RgtpDistribution, /// Baseline Rac1 activation rate. pub kgtp_rac: Tinv, /// Rac1 auto-activation rate. pub kgtp_rac_auto: Tinv, /// Baseline Rac1 inactivation rate. pub kdgtp_rac: Tinv, /// RhoA mediated inhibition of Rac1. pub kdgtp_rho_on_rac: Tinv, /// Strain at which Rac1 tension-mediated inhibition is half-strength. pub halfmax_tension_inhib: f64, /// Maximum tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate. pub tension_inhib: f64, /// Rate at which inactive membrane bound Rho GTPase dissociates from the /// membrane. pub k_mem_off: Tinv, /// Rate at which cytosolic Rho GTPase associates with the membrane. pub k_mem_on: Tinv, /// Baseline RhoA activation rate. pub kgtp_rho: Tinv, /// RhoA auto-activation rate. pub kgtp_auto_rho: Tinv, /// Baseline RhoA inactivation rate. pub kdgtp_rho: Tinv, /// Rac1 mediated inhibition of RhoA. pub kdgtp_rac_on_rho: Tinv, /// Enable randomization of bursts in Rac1 activity? pub randomization: bool, /// Average period between randomization events. pub rand_avg_t: Time, /// Standard deviation of period between randomization events. pub rand_std_t: Time, /// Magnitude of randomly applied factor affecting Rac1 activation rate: how big a burst? pub rand_mag: f64, /// Fraction of vertices to be selected for increased Rac1 activation due to random events. pub rand_vs: f64, } #[derive(Copy, Clone, Deserialize, Serialize, Default, Debug, PartialEq)] pub struct Parameters { /// Resting cell radius. pub cell_r: f64, /// Resting edge length. pub rest_edge_len: f64, /// Resting area. pub rest_area: f64, /// Stiffness of edge. pub stiffness_edge: f64, /// Rac1 mediated protrusive force constant. pub const_protrusive: f64, /// RhoA mediated protrusive force constant. pub const_retractive: f64, /// Stiffness of cytoplasm. pub stiffness_cyto: f64, /// Rate of Rho GTPase GDI unbinding and subsequent membrane attachment. pub k_mem_on_vertex: f64, /// Rate of Rho GTPase membrane disassociation. pub k_mem_off: f64, /// Diffusion rate of Rho GTPase on membrane. pub diffusion_rgtp: f64, /// Initial distribution of Rac1. pub init_rac: RgtpDistribution, /// Initial distribution of RhoA. pub init_rho: RgtpDistribution, /// Halfmax Rho GTPase activity per vertex. pub halfmax_vertex_rgtp: f64, /// Halfmax Rho GTPase activity per vertex as concentration. pub halfmax_vertex_rgtp_conc: f64, /// Baseline Rac1 activation rate. pub kgtp_rac: f64, /// Rac1 auto-activation rate as a multiple of baseline Rac1 activation rate. pub kgtp_rac_auto: f64, /// Baseline Rac1 inactivation rate. pub kdgtp_rac: f64, /// RhoA mediated inhibition of Rac1 as a multiple of baseline Rac1 inactivation rate. pub kdgtp_rho_on_rac: f64, /// Strain at which Rac1 tension-mediated inhibition is half-strength. pub halfmax_tension_inhib: f64, /// Tension-mediated Rac1 inhibition as a multiple of baseline Rac1 inactivation rate. pub tension_inhib: f64, /// Baseline RhoA activation rate. pub kgtp_rho: f64, /// RhoA auto-activation rate as a multiple of baseline RhoA activation rate. pub kgtp_rho_auto: f64, /// Baseline RhoA inactivation rate. pub kdgtp_rho: f64, /// Rac1 mediated inhibition of RhoA as a multiple of baseline RhoA inactivation rate. pub kdgtp_rac_on_rho: f64, /// Enable randomization of bursts in Rac1 activity? pub randomization: bool, /// Average time between random events, in timepoints. pub rand_avg_t: f64, /// Standard deviation of time between random events, in timepoints. pub rand_std_t: f64, /// Magnitude of factor randomly applied to Rac1 activation rate. pub rand_mag: f64, /// Number of vertices to be selected for random Rac1 activity boost. pub num_rand_vs: u32, } impl RawParameters { pub fn refine(&self, bq: &CharacteristicQuantities) -> Parameters { let cell_r = self.cell_diam.scale(0.5); let rel = self.cell_diam.scale((PI / (NVERTS as f64)).sin()); let ra = Length(1.0) .pow(2.0) .scale(calc_init_cell_area(cell_r.number())); let const_protrusive = (self.lm_h.g() * self.lm_ss.g() * rel.g()) .scale(self.halfmax_rgtp_max_f_frac); let const_retractive = const_protrusive.scale(self.rho_friction); let halfmax_vertex_rgtp = self.halfmax_rgtp_frac / NVERTS as f64; let halfmax_vertex_rgtp_conc = rel.pow(-1.0).scale(halfmax_vertex_rgtp); let stiffness_edge = self.stiffness_cortex.g() * bq.l3d.g(); let stiffness_cyto = self.stiffness_cyto.g().scale(1.0 / NVERTS as f64); Parameters { cell_r: bq.normalize(&cell_r), rest_edge_len: bq.normalize(&rel), rest_area: bq.normalize(&ra), stiffness_edge: bq.normalize(&stiffness_edge), const_protrusive: bq.normalize(&const_protrusive), const_retractive: bq.normalize(&const_retractive), stiffness_cyto: bq.normalize(&stiffness_cyto), k_mem_on_vertex: bq.normalize(&self.k_mem_on) / NVERTS as f64, k_mem_off: bq.normalize(&self.k_mem_off), diffusion_rgtp: bq.normalize(&self.diffusion_rgtp), init_rac: self.init_rac, init_rho: self.init_rho, halfmax_vertex_rgtp, halfmax_vertex_rgtp_conc: bq.normalize(&halfmax_vertex_rgtp_conc), kgtp_rac: bq.normalize(&self.kgtp_rac), kgtp_rac_auto: bq.normalize(&self.kgtp_rac_auto), kdgtp_rac: bq.normalize(&self.kdgtp_rac), kdgtp_rho_on_rac: bq.normalize(&self.kdgtp_rho_on_rac), halfmax_tension_inhib: self.halfmax_tension_inhib, tension_inhib: self.tension_inhib, kgtp_rho: bq.normalize(&self.kgtp_rho), kgtp_rho_auto: bq.normalize(&self.kgtp_auto_rho), kdgtp_rho: bq.normalize(&self.kdgtp_rho), kdgtp_rac_on_rho: bq.normalize(&self.kdgtp_rac_on_rho), randomization: self.randomization, rand_avg_t: bq.normalize(&self.rand_avg_t).ceil(), rand_std_t: bq.normalize(&self.rand_std_t).ceil(), rand_mag: self.rand_mag, num_rand_vs: (self.rand_vs * NVERTS as f64) as u32, } } } /// Calculate the area of an "ideal" initial cell of radius R, if it has /// `NVERTS` vertices. pub fn calc_init_cell_area(r: f64) -> f64 { let poly_coords = (0..NVERTS) .map(|vix| { let theta = (vix as f64) / (NVERTS as f64) * 2.0 * PI; V2d { x: r * theta.cos(), y: r * theta.sin(), } }) .collect::<Vec<V2d>>(); calc_poly_area(&poly_coords) }
orldParameters
identifier_name
plugins.go
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "fmt" "log" "net" "sort" "sync" "github.com/coredns/caddy/caddyfile" ) // These are all the registered plugins. var ( // serverTypes is a map of registered server types. serverTypes = make(map[string]ServerType) // plugins is a map of server type to map of plugin name to // Plugin. These are the "general" plugins that may or may // not be associated with a specific server type. If it's // applicable to multiple server types or the server type is // irrelevant, the key is empty string (""). But all plugins // must have a name. plugins = make(map[string]map[string]Plugin) // eventHooks is a map of hook name to Hook. All hooks plugins // must have a name. eventHooks = &sync.Map{} // parsingCallbacks maps server type to map of directive // to list of callback functions. These aren't really // plugins on their own, but are often registered from // plugins. parsingCallbacks = make(map[string]map[string][]ParsingCallback) // caddyfileLoaders is the list of all Caddyfile loaders // in registration order. caddyfileLoaders []caddyfileLoader ) // DescribePlugins returns a string describing the registered plugins. func DescribePlugins() string { pl := ListPlugins() str := "Server types:\n" for _, name := range pl["server_types"] { str += " " + name + "\n" } str += "\nCaddyfile loaders:\n" for _, name := range pl["caddyfile_loaders"] { str += " " + name + "\n" } if len(pl["event_hooks"]) > 0 { str += "\nEvent hook plugins:\n" for _, name := range pl["event_hooks"] { str += " hook." + name + "\n" } } if len(pl["clustering"]) > 0 { str += "\nClustering plugins:\n" for _, name := range pl["clustering"] { str += " " + name + "\n" } } str += "\nOther plugins:\n" for _, name := range pl["others"] { str += " " + name + "\n" } return str } // ListPlugins makes a list of the registered plugins, // keyed by plugin type. func ListPlugins() map[string][]string { p := make(map[string][]string) // server type plugins for name := range serverTypes
// caddyfile loaders in registration order for _, loader := range caddyfileLoaders { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name) } if defaultCaddyfileLoader.name != "" { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name) } // List the event hook plugins eventHooks.Range(func(k, _ interface{}) bool { p["event_hooks"] = append(p["event_hooks"], k.(string)) return true }) // alphabetize the rest of the plugins var others []string for stype, stypePlugins := range plugins { for name := range stypePlugins { var s string if stype != "" { s = stype + "." } s += name others = append(others, s) } } sort.Strings(others) for _, name := range others { p["others"] = append(p["others"], name) } return p } // ValidDirectives returns the list of all directives that are // recognized for the server type serverType. However, not all // directives may be installed. This makes it possible to give // more helpful error messages, like "did you mean ..." or // "maybe you need to plug in ...". func ValidDirectives(serverType string) []string { stype, err := getServerType(serverType) if err != nil { return nil } return stype.Directives() } // ServerListener pairs a server to its listener and/or packetconn. type ServerListener struct { server Server listener net.Listener packet net.PacketConn } // LocalAddr returns the local network address of the packetconn. It returns // nil when it is not set. func (s ServerListener) LocalAddr() net.Addr { if s.packet == nil { return nil } return s.packet.LocalAddr() } // Addr returns the listener's network address. It returns nil when it is // not set. func (s ServerListener) Addr() net.Addr { if s.listener == nil { return nil } return s.listener.Addr() } // Context is a type which carries a server type through // the load and setup phase; it maintains the state // between loading the Caddyfile, then executing its // directives, then making the servers for Caddy to // manage. Typically, such state involves configuration // structs, etc. type Context interface { // Called after the Caddyfile is parsed into server // blocks but before the directives are executed, // this method gives you an opportunity to inspect // the server blocks and prepare for the execution // of directives. Return the server blocks (which // you may modify, if desired) and an error, if any. // The first argument is the name or path to the // configuration file (Caddyfile). // // This function can be a no-op and simply return its // input if there is nothing to do here. InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) // This is what Caddy calls to make server instances. // By this time, all directives have been executed and, // presumably, the context has enough state to produce // server instances for Caddy to start. MakeServers() ([]Server, error) } // RegisterServerType registers a server type srv by its // name, typeName. func RegisterServerType(typeName string, srv ServerType) { if _, ok := serverTypes[typeName]; ok { panic("server type already registered") } serverTypes[typeName] = srv } // ServerType contains information about a server type. type ServerType struct { // Function that returns the list of directives, in // execution order, that are valid for this server // type. Directives should be one word if possible // and lower-cased. Directives func() []string // DefaultInput returns a default config input if none // is otherwise loaded. This is optional, but highly // recommended, otherwise a blank Caddyfile will be // used. DefaultInput func() Input // The function that produces a new server type context. // This will be called when a new Caddyfile is being // loaded, parsed, and executed independently of any // startup phases before this one. It's a way to keep // each set of server instances separate and to reduce // the amount of global state you need. NewContext func(inst *Instance) Context } // Plugin is a type which holds information about a plugin. type Plugin struct { // ServerType is the type of server this plugin is for. // Can be empty if not applicable, or if the plugin // can associate with any server type. ServerType string // Action is the plugin's setup function, if associated // with a directive in the Caddyfile. Action SetupFunc } // RegisterPlugin plugs in plugin. All plugins should register // themselves, even if they do not perform an action associated // with a directive. It is important for the process to know // which plugins are available. // // The plugin MUST have a name: lower case and one word. // If this plugin has an action, it must be the name of // the directive that invokes it. A name is always required // and must be unique for the server type. func RegisterPlugin(name string, plugin Plugin) { if name == "" { panic("plugin must have a name") } if _, ok := plugins[plugin.ServerType]; !ok { plugins[plugin.ServerType] = make(map[string]Plugin) } if _, dup := plugins[plugin.ServerType][name]; dup { panic("plugin named " + name + " already registered for server type " + plugin.ServerType) } plugins[plugin.ServerType][name] = plugin } // EventName represents the name of an event used with event hooks. type EventName string // Define names for the various events const ( StartupEvent EventName = "startup" ShutdownEvent = "shutdown" CertRenewEvent = "certrenew" InstanceStartupEvent = "instancestartup" InstanceRestartEvent = "instancerestart" ) // EventHook is a type which holds information about a startup hook plugin. type EventHook func(eventType EventName, eventInfo interface{}) error // RegisterEventHook plugs in hook. All the hooks should register themselves // and they must have a name. func RegisterEventHook(name string, hook EventHook) { if name == "" { panic("event hook must have a name") } _, dup := eventHooks.LoadOrStore(name, hook) if dup { panic("hook named " + name + " already registered") } } // EmitEvent executes the different hooks passing the EventType as an // argument. This is a blocking function. Hook developers should // use 'go' keyword if they don't want to block Caddy. func EmitEvent(event EventName, info interface{}) { eventHooks.Range(func(k, v interface{}) bool { err := v.(EventHook)(event, info) if err != nil { log.Printf("error on '%s' hook: %v", k.(string), err) } return true }) } // cloneEventHooks return a clone of the event hooks *sync.Map func cloneEventHooks() *sync.Map { c := &sync.Map{} eventHooks.Range(func(k, v interface{}) bool { c.Store(k, v) return true }) return c } // purgeEventHooks purges all event hooks from the map func purgeEventHooks() { eventHooks.Range(func(k, _ interface{}) bool { eventHooks.Delete(k) return true }) } // restoreEventHooks restores eventHooks with a provided *sync.Map func restoreEventHooks(m *sync.Map) { // Purge old event hooks purgeEventHooks() // Restore event hooks m.Range(func(k, v interface{}) bool { eventHooks.Store(k, v) return true }) } // ParsingCallback is a function that is called after // a directive's setup functions have been executed // for all the server blocks. type ParsingCallback func(Context) error // RegisterParsingCallback registers callback to be called after // executing the directive afterDir for server type serverType. func RegisterParsingCallback(serverType, afterDir string, callback ParsingCallback) { if _, ok := parsingCallbacks[serverType]; !ok { parsingCallbacks[serverType] = make(map[string][]ParsingCallback) } parsingCallbacks[serverType][afterDir] = append(parsingCallbacks[serverType][afterDir], callback) } // SetupFunc is used to set up a plugin, or in other words, // execute a directive. It will be called once per key for // each server block it appears in. type SetupFunc func(c *Controller) error // DirectiveAction gets the action for directive dir of // server type serverType. func DirectiveAction(serverType, dir string) (SetupFunc, error) { if stypePlugins, ok := plugins[serverType]; ok { if plugin, ok := stypePlugins[dir]; ok { return plugin.Action, nil } } if genericPlugins, ok := plugins[""]; ok { if plugin, ok := genericPlugins[dir]; ok { return plugin.Action, nil } } return nil, fmt.Errorf("no action found for directive '%s' with server type '%s' (missing a plugin?)", dir, serverType) } // Loader is a type that can load a Caddyfile. // It is passed the name of the server type. // It returns an error only if something went // wrong, not simply if there is no Caddyfile // for this loader to load. // // A Loader should only load the Caddyfile if // a certain condition or requirement is met, // as returning a non-nil Input value along with // another Loader will result in an error. // In other words, loading the Caddyfile must // be deliberate & deterministic, not haphazard. // // The exception is the default Caddyfile loader, // which will be called only if no other Caddyfile // loaders return a non-nil Input. The default // loader may always return an Input value. type Loader interface { Load(serverType string) (Input, error) } // LoaderFunc is a convenience type similar to http.HandlerFunc // that allows you to use a plain function as a Load() method. type LoaderFunc func(serverType string) (Input, error) // Load loads a Caddyfile. func (lf LoaderFunc) Load(serverType string) (Input, error) { return lf(serverType) } // RegisterCaddyfileLoader registers loader named name. func RegisterCaddyfileLoader(name string, loader Loader) { caddyfileLoaders = append(caddyfileLoaders, caddyfileLoader{name: name, loader: loader}) } // SetDefaultCaddyfileLoader registers loader by name // as the default Caddyfile loader if no others produce // a Caddyfile. If another Caddyfile loader has already // been set as the default, this replaces it. // // Do not call RegisterCaddyfileLoader on the same // loader; that would be redundant. func SetDefaultCaddyfileLoader(name string, loader Loader) { defaultCaddyfileLoader = caddyfileLoader{name: name, loader: loader} } // loadCaddyfileInput iterates the registered Caddyfile loaders // and, if needed, calls the default loader, to load a Caddyfile. // It is an error if any of the loaders return an error or if // more than one loader returns a Caddyfile. func loadCaddyfileInput(serverType string) (Input, error) { var loadedBy string var caddyfileToUse Input for _, l := range caddyfileLoaders { cdyfile, err := l.loader.Load(serverType) if err != nil { return nil, fmt.Errorf("loading Caddyfile via %s: %v", l.name, err) } if cdyfile != nil { if caddyfileToUse != nil { return nil, fmt.Errorf("Caddyfile loaded multiple times; first by %s, then by %s", loadedBy, l.name) } loaderUsed = l caddyfileToUse = cdyfile loadedBy = l.name } } if caddyfileToUse == nil && defaultCaddyfileLoader.loader != nil { cdyfile, err := defaultCaddyfileLoader.loader.Load(serverType) if err != nil { return nil, err } if cdyfile != nil { loaderUsed = defaultCaddyfileLoader caddyfileToUse = cdyfile } } return caddyfileToUse, nil } // OnProcessExit is a list of functions to run when the process // exits -- they are ONLY for cleanup and should not block, // return errors, or do anything fancy. They will be run with // every signal, even if "shutdown callbacks" are not executed. // This variable must only be modified in the main goroutine // from init() functions. var OnProcessExit []func() // caddyfileLoader pairs the name of a loader to the loader. type caddyfileLoader struct { name string loader Loader } var ( defaultCaddyfileLoader caddyfileLoader // the default loader if all else fail loaderUsed caddyfileLoader // the loader that was used (relevant for reloads) )
{ p["server_types"] = append(p["server_types"], name) }
conditional_block
plugins.go
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "fmt" "log" "net" "sort" "sync" "github.com/coredns/caddy/caddyfile" ) // These are all the registered plugins. var ( // serverTypes is a map of registered server types. serverTypes = make(map[string]ServerType) // plugins is a map of server type to map of plugin name to // Plugin. These are the "general" plugins that may or may // not be associated with a specific server type. If it's // applicable to multiple server types or the server type is // irrelevant, the key is empty string (""). But all plugins // must have a name. plugins = make(map[string]map[string]Plugin) // eventHooks is a map of hook name to Hook. All hooks plugins // must have a name. eventHooks = &sync.Map{} // parsingCallbacks maps server type to map of directive // to list of callback functions. These aren't really // plugins on their own, but are often registered from // plugins. parsingCallbacks = make(map[string]map[string][]ParsingCallback) // caddyfileLoaders is the list of all Caddyfile loaders // in registration order. caddyfileLoaders []caddyfileLoader ) // DescribePlugins returns a string describing the registered plugins. func DescribePlugins() string { pl := ListPlugins() str := "Server types:\n" for _, name := range pl["server_types"] { str += " " + name + "\n" } str += "\nCaddyfile loaders:\n" for _, name := range pl["caddyfile_loaders"] { str += " " + name + "\n" } if len(pl["event_hooks"]) > 0 { str += "\nEvent hook plugins:\n" for _, name := range pl["event_hooks"] { str += " hook." + name + "\n" } } if len(pl["clustering"]) > 0 { str += "\nClustering plugins:\n" for _, name := range pl["clustering"] { str += " " + name + "\n" } } str += "\nOther plugins:\n" for _, name := range pl["others"] { str += " " + name + "\n" } return str } // ListPlugins makes a list of the registered plugins, // keyed by plugin type. func ListPlugins() map[string][]string { p := make(map[string][]string) // server type plugins for name := range serverTypes { p["server_types"] = append(p["server_types"], name) } // caddyfile loaders in registration order for _, loader := range caddyfileLoaders { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name) } if defaultCaddyfileLoader.name != "" { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name) } // List the event hook plugins eventHooks.Range(func(k, _ interface{}) bool { p["event_hooks"] = append(p["event_hooks"], k.(string)) return true }) // alphabetize the rest of the plugins var others []string for stype, stypePlugins := range plugins { for name := range stypePlugins { var s string if stype != "" { s = stype + "." } s += name others = append(others, s) } } sort.Strings(others) for _, name := range others { p["others"] = append(p["others"], name) } return p } // ValidDirectives returns the list of all directives that are // recognized for the server type serverType. However, not all // directives may be installed. This makes it possible to give // more helpful error messages, like "did you mean ..." or // "maybe you need to plug in ...". func ValidDirectives(serverType string) []string { stype, err := getServerType(serverType) if err != nil { return nil } return stype.Directives() } // ServerListener pairs a server to its listener and/or packetconn. type ServerListener struct { server Server listener net.Listener packet net.PacketConn } // LocalAddr returns the local network address of the packetconn. It returns // nil when it is not set. func (s ServerListener) LocalAddr() net.Addr { if s.packet == nil { return nil } return s.packet.LocalAddr() } // Addr returns the listener's network address. It returns nil when it is // not set. func (s ServerListener) Addr() net.Addr { if s.listener == nil { return nil } return s.listener.Addr() } // Context is a type which carries a server type through // the load and setup phase; it maintains the state // between loading the Caddyfile, then executing its // directives, then making the servers for Caddy to // manage. Typically, such state involves configuration // structs, etc. type Context interface { // Called after the Caddyfile is parsed into server // blocks but before the directives are executed, // this method gives you an opportunity to inspect // the server blocks and prepare for the execution // of directives. Return the server blocks (which // you may modify, if desired) and an error, if any. // The first argument is the name or path to the // configuration file (Caddyfile). // // This function can be a no-op and simply return its // input if there is nothing to do here. InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) // This is what Caddy calls to make server instances. // By this time, all directives have been executed and, // presumably, the context has enough state to produce // server instances for Caddy to start. MakeServers() ([]Server, error) } // RegisterServerType registers a server type srv by its // name, typeName. func RegisterServerType(typeName string, srv ServerType) { if _, ok := serverTypes[typeName]; ok { panic("server type already registered") } serverTypes[typeName] = srv } // ServerType contains information about a server type. type ServerType struct { // Function that returns the list of directives, in // execution order, that are valid for this server // type. Directives should be one word if possible // and lower-cased. Directives func() []string // DefaultInput returns a default config input if none // is otherwise loaded. This is optional, but highly // recommended, otherwise a blank Caddyfile will be // used. DefaultInput func() Input // The function that produces a new server type context. // This will be called when a new Caddyfile is being // loaded, parsed, and executed independently of any // startup phases before this one. It's a way to keep // each set of server instances separate and to reduce // the amount of global state you need. NewContext func(inst *Instance) Context } // Plugin is a type which holds information about a plugin. type Plugin struct { // ServerType is the type of server this plugin is for. // Can be empty if not applicable, or if the plugin // can associate with any server type. ServerType string // Action is the plugin's setup function, if associated // with a directive in the Caddyfile. Action SetupFunc } // RegisterPlugin plugs in plugin. All plugins should register // themselves, even if they do not perform an action associated // with a directive. It is important for the process to know // which plugins are available. // // The plugin MUST have a name: lower case and one word. // If this plugin has an action, it must be the name of // the directive that invokes it. A name is always required // and must be unique for the server type. func RegisterPlugin(name string, plugin Plugin) { if name == "" { panic("plugin must have a name") } if _, ok := plugins[plugin.ServerType]; !ok { plugins[plugin.ServerType] = make(map[string]Plugin) } if _, dup := plugins[plugin.ServerType][name]; dup { panic("plugin named " + name + " already registered for server type " + plugin.ServerType) } plugins[plugin.ServerType][name] = plugin } // EventName represents the name of an event used with event hooks. type EventName string // Define names for the various events const ( StartupEvent EventName = "startup" ShutdownEvent = "shutdown" CertRenewEvent = "certrenew" InstanceStartupEvent = "instancestartup" InstanceRestartEvent = "instancerestart" ) // EventHook is a type which holds information about a startup hook plugin. type EventHook func(eventType EventName, eventInfo interface{}) error // RegisterEventHook plugs in hook. All the hooks should register themselves // and they must have a name. func RegisterEventHook(name string, hook EventHook) { if name == "" { panic("event hook must have a name") } _, dup := eventHooks.LoadOrStore(name, hook) if dup { panic("hook named " + name + " already registered") } } // EmitEvent executes the different hooks passing the EventType as an // argument. This is a blocking function. Hook developers should // use 'go' keyword if they don't want to block Caddy. func EmitEvent(event EventName, info interface{}) { eventHooks.Range(func(k, v interface{}) bool { err := v.(EventHook)(event, info) if err != nil { log.Printf("error on '%s' hook: %v", k.(string), err) } return true }) } // cloneEventHooks return a clone of the event hooks *sync.Map func cloneEventHooks() *sync.Map { c := &sync.Map{} eventHooks.Range(func(k, v interface{}) bool { c.Store(k, v) return true }) return c } // purgeEventHooks purges all event hooks from the map func purgeEventHooks() { eventHooks.Range(func(k, _ interface{}) bool { eventHooks.Delete(k) return true }) } // restoreEventHooks restores eventHooks with a provided *sync.Map func restoreEventHooks(m *sync.Map) { // Purge old event hooks purgeEventHooks() // Restore event hooks m.Range(func(k, v interface{}) bool { eventHooks.Store(k, v) return true }) } // ParsingCallback is a function that is called after // a directive's setup functions have been executed // for all the server blocks. type ParsingCallback func(Context) error // RegisterParsingCallback registers callback to be called after // executing the directive afterDir for server type serverType. func
(serverType, afterDir string, callback ParsingCallback) { if _, ok := parsingCallbacks[serverType]; !ok { parsingCallbacks[serverType] = make(map[string][]ParsingCallback) } parsingCallbacks[serverType][afterDir] = append(parsingCallbacks[serverType][afterDir], callback) } // SetupFunc is used to set up a plugin, or in other words, // execute a directive. It will be called once per key for // each server block it appears in. type SetupFunc func(c *Controller) error // DirectiveAction gets the action for directive dir of // server type serverType. func DirectiveAction(serverType, dir string) (SetupFunc, error) { if stypePlugins, ok := plugins[serverType]; ok { if plugin, ok := stypePlugins[dir]; ok { return plugin.Action, nil } } if genericPlugins, ok := plugins[""]; ok { if plugin, ok := genericPlugins[dir]; ok { return plugin.Action, nil } } return nil, fmt.Errorf("no action found for directive '%s' with server type '%s' (missing a plugin?)", dir, serverType) } // Loader is a type that can load a Caddyfile. // It is passed the name of the server type. // It returns an error only if something went // wrong, not simply if there is no Caddyfile // for this loader to load. // // A Loader should only load the Caddyfile if // a certain condition or requirement is met, // as returning a non-nil Input value along with // another Loader will result in an error. // In other words, loading the Caddyfile must // be deliberate & deterministic, not haphazard. // // The exception is the default Caddyfile loader, // which will be called only if no other Caddyfile // loaders return a non-nil Input. The default // loader may always return an Input value. type Loader interface { Load(serverType string) (Input, error) } // LoaderFunc is a convenience type similar to http.HandlerFunc // that allows you to use a plain function as a Load() method. type LoaderFunc func(serverType string) (Input, error) // Load loads a Caddyfile. func (lf LoaderFunc) Load(serverType string) (Input, error) { return lf(serverType) } // RegisterCaddyfileLoader registers loader named name. func RegisterCaddyfileLoader(name string, loader Loader) { caddyfileLoaders = append(caddyfileLoaders, caddyfileLoader{name: name, loader: loader}) } // SetDefaultCaddyfileLoader registers loader by name // as the default Caddyfile loader if no others produce // a Caddyfile. If another Caddyfile loader has already // been set as the default, this replaces it. // // Do not call RegisterCaddyfileLoader on the same // loader; that would be redundant. func SetDefaultCaddyfileLoader(name string, loader Loader) { defaultCaddyfileLoader = caddyfileLoader{name: name, loader: loader} } // loadCaddyfileInput iterates the registered Caddyfile loaders // and, if needed, calls the default loader, to load a Caddyfile. // It is an error if any of the loaders return an error or if // more than one loader returns a Caddyfile. func loadCaddyfileInput(serverType string) (Input, error) { var loadedBy string var caddyfileToUse Input for _, l := range caddyfileLoaders { cdyfile, err := l.loader.Load(serverType) if err != nil { return nil, fmt.Errorf("loading Caddyfile via %s: %v", l.name, err) } if cdyfile != nil { if caddyfileToUse != nil { return nil, fmt.Errorf("Caddyfile loaded multiple times; first by %s, then by %s", loadedBy, l.name) } loaderUsed = l caddyfileToUse = cdyfile loadedBy = l.name } } if caddyfileToUse == nil && defaultCaddyfileLoader.loader != nil { cdyfile, err := defaultCaddyfileLoader.loader.Load(serverType) if err != nil { return nil, err } if cdyfile != nil { loaderUsed = defaultCaddyfileLoader caddyfileToUse = cdyfile } } return caddyfileToUse, nil } // OnProcessExit is a list of functions to run when the process // exits -- they are ONLY for cleanup and should not block, // return errors, or do anything fancy. They will be run with // every signal, even if "shutdown callbacks" are not executed. // This variable must only be modified in the main goroutine // from init() functions. var OnProcessExit []func() // caddyfileLoader pairs the name of a loader to the loader. type caddyfileLoader struct { name string loader Loader } var ( defaultCaddyfileLoader caddyfileLoader // the default loader if all else fail loaderUsed caddyfileLoader // the loader that was used (relevant for reloads) )
RegisterParsingCallback
identifier_name
plugins.go
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package caddy import ( "fmt" "log" "net" "sort" "sync" "github.com/coredns/caddy/caddyfile" ) // These are all the registered plugins. var ( // serverTypes is a map of registered server types. serverTypes = make(map[string]ServerType) // plugins is a map of server type to map of plugin name to // Plugin. These are the "general" plugins that may or may // not be associated with a specific server type. If it's // applicable to multiple server types or the server type is // irrelevant, the key is empty string (""). But all plugins // must have a name. plugins = make(map[string]map[string]Plugin) // eventHooks is a map of hook name to Hook. All hooks plugins // must have a name. eventHooks = &sync.Map{} // parsingCallbacks maps server type to map of directive // to list of callback functions. These aren't really // plugins on their own, but are often registered from // plugins. parsingCallbacks = make(map[string]map[string][]ParsingCallback) // caddyfileLoaders is the list of all Caddyfile loaders // in registration order. caddyfileLoaders []caddyfileLoader ) // DescribePlugins returns a string describing the registered plugins. func DescribePlugins() string { pl := ListPlugins() str := "Server types:\n" for _, name := range pl["server_types"] { str += " " + name + "\n" } str += "\nCaddyfile loaders:\n" for _, name := range pl["caddyfile_loaders"] { str += " " + name + "\n" } if len(pl["event_hooks"]) > 0 { str += "\nEvent hook plugins:\n" for _, name := range pl["event_hooks"] { str += " hook." + name + "\n" } } if len(pl["clustering"]) > 0 { str += "\nClustering plugins:\n" for _, name := range pl["clustering"] { str += " " + name + "\n" } } str += "\nOther plugins:\n" for _, name := range pl["others"] { str += " " + name + "\n" } return str } // ListPlugins makes a list of the registered plugins, // keyed by plugin type. func ListPlugins() map[string][]string { p := make(map[string][]string) // server type plugins for name := range serverTypes { p["server_types"] = append(p["server_types"], name) } // caddyfile loaders in registration order for _, loader := range caddyfileLoaders { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name) } if defaultCaddyfileLoader.name != "" { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name) } // List the event hook plugins eventHooks.Range(func(k, _ interface{}) bool { p["event_hooks"] = append(p["event_hooks"], k.(string)) return true }) // alphabetize the rest of the plugins var others []string for stype, stypePlugins := range plugins { for name := range stypePlugins { var s string if stype != "" { s = stype + "." } s += name others = append(others, s) } } sort.Strings(others) for _, name := range others { p["others"] = append(p["others"], name) } return p } // ValidDirectives returns the list of all directives that are // recognized for the server type serverType. However, not all // directives may be installed. This makes it possible to give // more helpful error messages, like "did you mean ..." or // "maybe you need to plug in ...". func ValidDirectives(serverType string) []string { stype, err := getServerType(serverType) if err != nil { return nil } return stype.Directives() } // ServerListener pairs a server to its listener and/or packetconn. type ServerListener struct { server Server listener net.Listener packet net.PacketConn } // LocalAddr returns the local network address of the packetconn. It returns // nil when it is not set. func (s ServerListener) LocalAddr() net.Addr { if s.packet == nil { return nil } return s.packet.LocalAddr() } // Addr returns the listener's network address. It returns nil when it is // not set. func (s ServerListener) Addr() net.Addr { if s.listener == nil { return nil } return s.listener.Addr() } // Context is a type which carries a server type through // the load and setup phase; it maintains the state // between loading the Caddyfile, then executing its // directives, then making the servers for Caddy to // manage. Typically, such state involves configuration // structs, etc. type Context interface { // Called after the Caddyfile is parsed into server // blocks but before the directives are executed, // this method gives you an opportunity to inspect // the server blocks and prepare for the execution // of directives. Return the server blocks (which // you may modify, if desired) and an error, if any. // The first argument is the name or path to the // configuration file (Caddyfile). // // This function can be a no-op and simply return its // input if there is nothing to do here. InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) // This is what Caddy calls to make server instances. // By this time, all directives have been executed and, // presumably, the context has enough state to produce // server instances for Caddy to start. MakeServers() ([]Server, error) } // RegisterServerType registers a server type srv by its // name, typeName. func RegisterServerType(typeName string, srv ServerType) { if _, ok := serverTypes[typeName]; ok { panic("server type already registered") } serverTypes[typeName] = srv } // ServerType contains information about a server type. type ServerType struct { // Function that returns the list of directives, in // execution order, that are valid for this server // type. Directives should be one word if possible // and lower-cased. Directives func() []string // DefaultInput returns a default config input if none // is otherwise loaded. This is optional, but highly // recommended, otherwise a blank Caddyfile will be // used. DefaultInput func() Input // The function that produces a new server type context. // This will be called when a new Caddyfile is being // loaded, parsed, and executed independently of any // startup phases before this one. It's a way to keep // each set of server instances separate and to reduce // the amount of global state you need. NewContext func(inst *Instance) Context } // Plugin is a type which holds information about a plugin. type Plugin struct { // ServerType is the type of server this plugin is for. // Can be empty if not applicable, or if the plugin // can associate with any server type. ServerType string // Action is the plugin's setup function, if associated // with a directive in the Caddyfile. Action SetupFunc } // RegisterPlugin plugs in plugin. All plugins should register // themselves, even if they do not perform an action associated // with a directive. It is important for the process to know // which plugins are available. // // The plugin MUST have a name: lower case and one word. // If this plugin has an action, it must be the name of // the directive that invokes it. A name is always required // and must be unique for the server type. func RegisterPlugin(name string, plugin Plugin) { if name == "" { panic("plugin must have a name") } if _, ok := plugins[plugin.ServerType]; !ok { plugins[plugin.ServerType] = make(map[string]Plugin) } if _, dup := plugins[plugin.ServerType][name]; dup { panic("plugin named " + name + " already registered for server type " + plugin.ServerType) } plugins[plugin.ServerType][name] = plugin } // EventName represents the name of an event used with event hooks. type EventName string // Define names for the various events const ( StartupEvent EventName = "startup" ShutdownEvent = "shutdown" CertRenewEvent = "certrenew" InstanceStartupEvent = "instancestartup" InstanceRestartEvent = "instancerestart" ) // EventHook is a type which holds information about a startup hook plugin. type EventHook func(eventType EventName, eventInfo interface{}) error // RegisterEventHook plugs in hook. All the hooks should register themselves // and they must have a name. func RegisterEventHook(name string, hook EventHook) { if name == "" { panic("event hook must have a name") } _, dup := eventHooks.LoadOrStore(name, hook) if dup { panic("hook named " + name + " already registered") } } // EmitEvent executes the different hooks passing the EventType as an // argument. This is a blocking function. Hook developers should // use 'go' keyword if they don't want to block Caddy. func EmitEvent(event EventName, info interface{}) { eventHooks.Range(func(k, v interface{}) bool { err := v.(EventHook)(event, info) if err != nil { log.Printf("error on '%s' hook: %v", k.(string), err) } return true }) } // cloneEventHooks return a clone of the event hooks *sync.Map func cloneEventHooks() *sync.Map { c := &sync.Map{} eventHooks.Range(func(k, v interface{}) bool { c.Store(k, v) return true }) return c } // purgeEventHooks purges all event hooks from the map func purgeEventHooks()
// restoreEventHooks restores eventHooks with a provided *sync.Map func restoreEventHooks(m *sync.Map) { // Purge old event hooks purgeEventHooks() // Restore event hooks m.Range(func(k, v interface{}) bool { eventHooks.Store(k, v) return true }) } // ParsingCallback is a function that is called after // a directive's setup functions have been executed // for all the server blocks. type ParsingCallback func(Context) error // RegisterParsingCallback registers callback to be called after // executing the directive afterDir for server type serverType. func RegisterParsingCallback(serverType, afterDir string, callback ParsingCallback) { if _, ok := parsingCallbacks[serverType]; !ok { parsingCallbacks[serverType] = make(map[string][]ParsingCallback) } parsingCallbacks[serverType][afterDir] = append(parsingCallbacks[serverType][afterDir], callback) } // SetupFunc is used to set up a plugin, or in other words, // execute a directive. It will be called once per key for // each server block it appears in. type SetupFunc func(c *Controller) error // DirectiveAction gets the action for directive dir of // server type serverType. func DirectiveAction(serverType, dir string) (SetupFunc, error) { if stypePlugins, ok := plugins[serverType]; ok { if plugin, ok := stypePlugins[dir]; ok { return plugin.Action, nil } } if genericPlugins, ok := plugins[""]; ok { if plugin, ok := genericPlugins[dir]; ok { return plugin.Action, nil } } return nil, fmt.Errorf("no action found for directive '%s' with server type '%s' (missing a plugin?)", dir, serverType) } // Loader is a type that can load a Caddyfile. // It is passed the name of the server type. // It returns an error only if something went // wrong, not simply if there is no Caddyfile // for this loader to load. // // A Loader should only load the Caddyfile if // a certain condition or requirement is met, // as returning a non-nil Input value along with // another Loader will result in an error. // In other words, loading the Caddyfile must // be deliberate & deterministic, not haphazard. // // The exception is the default Caddyfile loader, // which will be called only if no other Caddyfile // loaders return a non-nil Input. The default // loader may always return an Input value. type Loader interface { Load(serverType string) (Input, error) } // LoaderFunc is a convenience type similar to http.HandlerFunc // that allows you to use a plain function as a Load() method. type LoaderFunc func(serverType string) (Input, error) // Load loads a Caddyfile. func (lf LoaderFunc) Load(serverType string) (Input, error) { return lf(serverType) } // RegisterCaddyfileLoader registers loader named name. func RegisterCaddyfileLoader(name string, loader Loader) { caddyfileLoaders = append(caddyfileLoaders, caddyfileLoader{name: name, loader: loader}) } // SetDefaultCaddyfileLoader registers loader by name // as the default Caddyfile loader if no others produce // a Caddyfile. If another Caddyfile loader has already // been set as the default, this replaces it. // // Do not call RegisterCaddyfileLoader on the same // loader; that would be redundant. func SetDefaultCaddyfileLoader(name string, loader Loader) { defaultCaddyfileLoader = caddyfileLoader{name: name, loader: loader} } // loadCaddyfileInput iterates the registered Caddyfile loaders // and, if needed, calls the default loader, to load a Caddyfile. // It is an error if any of the loaders return an error or if // more than one loader returns a Caddyfile. func loadCaddyfileInput(serverType string) (Input, error) { var loadedBy string var caddyfileToUse Input for _, l := range caddyfileLoaders { cdyfile, err := l.loader.Load(serverType) if err != nil { return nil, fmt.Errorf("loading Caddyfile via %s: %v", l.name, err) } if cdyfile != nil { if caddyfileToUse != nil { return nil, fmt.Errorf("Caddyfile loaded multiple times; first by %s, then by %s", loadedBy, l.name) } loaderUsed = l caddyfileToUse = cdyfile loadedBy = l.name } } if caddyfileToUse == nil && defaultCaddyfileLoader.loader != nil { cdyfile, err := defaultCaddyfileLoader.loader.Load(serverType) if err != nil { return nil, err } if cdyfile != nil { loaderUsed = defaultCaddyfileLoader caddyfileToUse = cdyfile } } return caddyfileToUse, nil } // OnProcessExit is a list of functions to run when the process // exits -- they are ONLY for cleanup and should not block, // return errors, or do anything fancy. They will be run with // every signal, even if "shutdown callbacks" are not executed. // This variable must only be modified in the main goroutine // from init() functions. var OnProcessExit []func() // caddyfileLoader pairs the name of a loader to the loader. type caddyfileLoader struct { name string loader Loader } var ( defaultCaddyfileLoader caddyfileLoader // the default loader if all else fail loaderUsed caddyfileLoader // the loader that was used (relevant for reloads) )
{ eventHooks.Range(func(k, _ interface{}) bool { eventHooks.Delete(k) return true }) }
identifier_body
plugins.go
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// limitations under the License. package caddy import ( "fmt" "log" "net" "sort" "sync" "github.com/coredns/caddy/caddyfile" ) // These are all the registered plugins. var ( // serverTypes is a map of registered server types. serverTypes = make(map[string]ServerType) // plugins is a map of server type to map of plugin name to // Plugin. These are the "general" plugins that may or may // not be associated with a specific server type. If it's // applicable to multiple server types or the server type is // irrelevant, the key is empty string (""). But all plugins // must have a name. plugins = make(map[string]map[string]Plugin) // eventHooks is a map of hook name to Hook. All hooks plugins // must have a name. eventHooks = &sync.Map{} // parsingCallbacks maps server type to map of directive // to list of callback functions. These aren't really // plugins on their own, but are often registered from // plugins. parsingCallbacks = make(map[string]map[string][]ParsingCallback) // caddyfileLoaders is the list of all Caddyfile loaders // in registration order. caddyfileLoaders []caddyfileLoader ) // DescribePlugins returns a string describing the registered plugins. func DescribePlugins() string { pl := ListPlugins() str := "Server types:\n" for _, name := range pl["server_types"] { str += " " + name + "\n" } str += "\nCaddyfile loaders:\n" for _, name := range pl["caddyfile_loaders"] { str += " " + name + "\n" } if len(pl["event_hooks"]) > 0 { str += "\nEvent hook plugins:\n" for _, name := range pl["event_hooks"] { str += " hook." + name + "\n" } } if len(pl["clustering"]) > 0 { str += "\nClustering plugins:\n" for _, name := range pl["clustering"] { str += " " + name + "\n" } } str += "\nOther plugins:\n" for _, name := range pl["others"] { str += " " + name + "\n" } return str } // ListPlugins makes a list of the registered plugins, // keyed by plugin type. func ListPlugins() map[string][]string { p := make(map[string][]string) // server type plugins for name := range serverTypes { p["server_types"] = append(p["server_types"], name) } // caddyfile loaders in registration order for _, loader := range caddyfileLoaders { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name) } if defaultCaddyfileLoader.name != "" { p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name) } // List the event hook plugins eventHooks.Range(func(k, _ interface{}) bool { p["event_hooks"] = append(p["event_hooks"], k.(string)) return true }) // alphabetize the rest of the plugins var others []string for stype, stypePlugins := range plugins { for name := range stypePlugins { var s string if stype != "" { s = stype + "." } s += name others = append(others, s) } } sort.Strings(others) for _, name := range others { p["others"] = append(p["others"], name) } return p } // ValidDirectives returns the list of all directives that are // recognized for the server type serverType. However, not all // directives may be installed. This makes it possible to give // more helpful error messages, like "did you mean ..." or // "maybe you need to plug in ...". func ValidDirectives(serverType string) []string { stype, err := getServerType(serverType) if err != nil { return nil } return stype.Directives() } // ServerListener pairs a server to its listener and/or packetconn. type ServerListener struct { server Server listener net.Listener packet net.PacketConn } // LocalAddr returns the local network address of the packetconn. It returns // nil when it is not set. func (s ServerListener) LocalAddr() net.Addr { if s.packet == nil { return nil } return s.packet.LocalAddr() } // Addr returns the listener's network address. It returns nil when it is // not set. func (s ServerListener) Addr() net.Addr { if s.listener == nil { return nil } return s.listener.Addr() } // Context is a type which carries a server type through // the load and setup phase; it maintains the state // between loading the Caddyfile, then executing its // directives, then making the servers for Caddy to // manage. Typically, such state involves configuration // structs, etc. type Context interface { // Called after the Caddyfile is parsed into server // blocks but before the directives are executed, // this method gives you an opportunity to inspect // the server blocks and prepare for the execution // of directives. Return the server blocks (which // you may modify, if desired) and an error, if any. // The first argument is the name or path to the // configuration file (Caddyfile). // // This function can be a no-op and simply return its // input if there is nothing to do here. InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) // This is what Caddy calls to make server instances. // By this time, all directives have been executed and, // presumably, the context has enough state to produce // server instances for Caddy to start. MakeServers() ([]Server, error) } // RegisterServerType registers a server type srv by its // name, typeName. func RegisterServerType(typeName string, srv ServerType) { if _, ok := serverTypes[typeName]; ok { panic("server type already registered") } serverTypes[typeName] = srv } // ServerType contains information about a server type. type ServerType struct { // Function that returns the list of directives, in // execution order, that are valid for this server // type. Directives should be one word if possible // and lower-cased. Directives func() []string // DefaultInput returns a default config input if none // is otherwise loaded. This is optional, but highly // recommended, otherwise a blank Caddyfile will be // used. DefaultInput func() Input // The function that produces a new server type context. // This will be called when a new Caddyfile is being // loaded, parsed, and executed independently of any // startup phases before this one. It's a way to keep // each set of server instances separate and to reduce // the amount of global state you need. NewContext func(inst *Instance) Context } // Plugin is a type which holds information about a plugin. type Plugin struct { // ServerType is the type of server this plugin is for. // Can be empty if not applicable, or if the plugin // can associate with any server type. ServerType string // Action is the plugin's setup function, if associated // with a directive in the Caddyfile. Action SetupFunc } // RegisterPlugin plugs in plugin. All plugins should register // themselves, even if they do not perform an action associated // with a directive. It is important for the process to know // which plugins are available. // // The plugin MUST have a name: lower case and one word. // If this plugin has an action, it must be the name of // the directive that invokes it. A name is always required // and must be unique for the server type. func RegisterPlugin(name string, plugin Plugin) { if name == "" { panic("plugin must have a name") } if _, ok := plugins[plugin.ServerType]; !ok { plugins[plugin.ServerType] = make(map[string]Plugin) } if _, dup := plugins[plugin.ServerType][name]; dup { panic("plugin named " + name + " already registered for server type " + plugin.ServerType) } plugins[plugin.ServerType][name] = plugin } // EventName represents the name of an event used with event hooks. type EventName string // Define names for the various events const ( StartupEvent EventName = "startup" ShutdownEvent = "shutdown" CertRenewEvent = "certrenew" InstanceStartupEvent = "instancestartup" InstanceRestartEvent = "instancerestart" ) // EventHook is a type which holds information about a startup hook plugin. type EventHook func(eventType EventName, eventInfo interface{}) error // RegisterEventHook plugs in hook. All the hooks should register themselves // and they must have a name. func RegisterEventHook(name string, hook EventHook) { if name == "" { panic("event hook must have a name") } _, dup := eventHooks.LoadOrStore(name, hook) if dup { panic("hook named " + name + " already registered") } } // EmitEvent executes the different hooks passing the EventType as an // argument. This is a blocking function. Hook developers should // use 'go' keyword if they don't want to block Caddy. func EmitEvent(event EventName, info interface{}) { eventHooks.Range(func(k, v interface{}) bool { err := v.(EventHook)(event, info) if err != nil { log.Printf("error on '%s' hook: %v", k.(string), err) } return true }) } // cloneEventHooks return a clone of the event hooks *sync.Map func cloneEventHooks() *sync.Map { c := &sync.Map{} eventHooks.Range(func(k, v interface{}) bool { c.Store(k, v) return true }) return c } // purgeEventHooks purges all event hooks from the map func purgeEventHooks() { eventHooks.Range(func(k, _ interface{}) bool { eventHooks.Delete(k) return true }) } // restoreEventHooks restores eventHooks with a provided *sync.Map func restoreEventHooks(m *sync.Map) { // Purge old event hooks purgeEventHooks() // Restore event hooks m.Range(func(k, v interface{}) bool { eventHooks.Store(k, v) return true }) } // ParsingCallback is a function that is called after // a directive's setup functions have been executed // for all the server blocks. type ParsingCallback func(Context) error // RegisterParsingCallback registers callback to be called after // executing the directive afterDir for server type serverType. func RegisterParsingCallback(serverType, afterDir string, callback ParsingCallback) { if _, ok := parsingCallbacks[serverType]; !ok { parsingCallbacks[serverType] = make(map[string][]ParsingCallback) } parsingCallbacks[serverType][afterDir] = append(parsingCallbacks[serverType][afterDir], callback) } // SetupFunc is used to set up a plugin, or in other words, // execute a directive. It will be called once per key for // each server block it appears in. type SetupFunc func(c *Controller) error // DirectiveAction gets the action for directive dir of // server type serverType. func DirectiveAction(serverType, dir string) (SetupFunc, error) { if stypePlugins, ok := plugins[serverType]; ok { if plugin, ok := stypePlugins[dir]; ok { return plugin.Action, nil } } if genericPlugins, ok := plugins[""]; ok { if plugin, ok := genericPlugins[dir]; ok { return plugin.Action, nil } } return nil, fmt.Errorf("no action found for directive '%s' with server type '%s' (missing a plugin?)", dir, serverType) } // Loader is a type that can load a Caddyfile. // It is passed the name of the server type. // It returns an error only if something went // wrong, not simply if there is no Caddyfile // for this loader to load. // // A Loader should only load the Caddyfile if // a certain condition or requirement is met, // as returning a non-nil Input value along with // another Loader will result in an error. // In other words, loading the Caddyfile must // be deliberate & deterministic, not haphazard. // // The exception is the default Caddyfile loader, // which will be called only if no other Caddyfile // loaders return a non-nil Input. The default // loader may always return an Input value. type Loader interface { Load(serverType string) (Input, error) } // LoaderFunc is a convenience type similar to http.HandlerFunc // that allows you to use a plain function as a Load() method. type LoaderFunc func(serverType string) (Input, error) // Load loads a Caddyfile. func (lf LoaderFunc) Load(serverType string) (Input, error) { return lf(serverType) } // RegisterCaddyfileLoader registers loader named name. func RegisterCaddyfileLoader(name string, loader Loader) { caddyfileLoaders = append(caddyfileLoaders, caddyfileLoader{name: name, loader: loader}) } // SetDefaultCaddyfileLoader registers loader by name // as the default Caddyfile loader if no others produce // a Caddyfile. If another Caddyfile loader has already // been set as the default, this replaces it. // // Do not call RegisterCaddyfileLoader on the same // loader; that would be redundant. func SetDefaultCaddyfileLoader(name string, loader Loader) { defaultCaddyfileLoader = caddyfileLoader{name: name, loader: loader} } // loadCaddyfileInput iterates the registered Caddyfile loaders // and, if needed, calls the default loader, to load a Caddyfile. // It is an error if any of the loaders return an error or if // more than one loader returns a Caddyfile. func loadCaddyfileInput(serverType string) (Input, error) { var loadedBy string var caddyfileToUse Input for _, l := range caddyfileLoaders { cdyfile, err := l.loader.Load(serverType) if err != nil { return nil, fmt.Errorf("loading Caddyfile via %s: %v", l.name, err) } if cdyfile != nil { if caddyfileToUse != nil { return nil, fmt.Errorf("Caddyfile loaded multiple times; first by %s, then by %s", loadedBy, l.name) } loaderUsed = l caddyfileToUse = cdyfile loadedBy = l.name } } if caddyfileToUse == nil && defaultCaddyfileLoader.loader != nil { cdyfile, err := defaultCaddyfileLoader.loader.Load(serverType) if err != nil { return nil, err } if cdyfile != nil { loaderUsed = defaultCaddyfileLoader caddyfileToUse = cdyfile } } return caddyfileToUse, nil } // OnProcessExit is a list of functions to run when the process // exits -- they are ONLY for cleanup and should not block, // return errors, or do anything fancy. They will be run with // every signal, even if "shutdown callbacks" are not executed. // This variable must only be modified in the main goroutine // from init() functions. var OnProcessExit []func() // caddyfileLoader pairs the name of a loader to the loader. type caddyfileLoader struct { name string loader Loader } var ( defaultCaddyfileLoader caddyfileLoader // the default loader if all else fail loaderUsed caddyfileLoader // the loader that was used (relevant for reloads) )
// See the License for the specific language governing permissions and
random_line_split
2#allAnglesLengths.py
#!/usr/bin/env python #all_angle_lengths.py """ Calculates all angles, lengths and representative angles and lengths. For a given triple of amino acids, this calculates all the angles , lengths and representative angle and length. The angles are the angles formed by the median of corresponding vertex and opposite edge. Attributes: path: Location where input required resources are stored. subfolder: The sample for which calculations are required. aminoAcidCode: Lexicographic file required for rule-based assignment. """ import math,glob,os,time from collections import Counter import pandas as pd from joblib import Parallel, delayed, cpu_count __author__ = "Sumi Singh, Venkata Sarika Kondra" __version__ = "1.0.2" __maintainer__ = "Venkata Sarika Kondra" __email__ = "[email protected]" round_off_to = 2 total_samples = 12 setting = 'corrected' def calcDist(indexLabel1,indexLabel2):
def indexFind(index_of_2,i1,j1,k1): if index_of_2==i1: indexOf0=j1 indexOf1=k1 elif index_of_2==j1: indexOf0=i1 indexOf1=k1 elif index_of_2==k1: indexOf0=i1 indexOf1=j1 return indexOf0, indexOf1 def processFiles(fileName): """Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling. Arguments: fileName: The protein file in PDB/ENT format. Returns: all_angleList: A Counter having all angles formed by their medians on opposite edges of the non-collinear triangle formed by the three amino acids at i, j and k and their frequencies of occurences in this protein file rounded to next significant digit. rep_angleList: A Counter having representative angle and its frequency all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed. maxDist: Maximum length among all lengths calculated above. """ print fileName count_t1 = 0 inFile=open(fileName,'r') all_angleList = Counter() rep_angleList = Counter() all_lengthsList = Counter() maxDist_List = Counter() global xCord, yCord, zCord aminoAcidName={} xCord={} yCord={} zCord={} seq_number={} counter=0 for i in inFile: if (i[0:6].rstrip()=="NUMMDL"): numOfModels=i[10:14].rstrip() if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')): break if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1): break if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" : aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]]) xCord[counter]=(float(i[30:38])) yCord[counter]=(float(i[38:46])) zCord[counter]=(float(i[46:54])) seq_number[counter]=str(i[22:27]) counter+=1 protLen=len(yCord) initialLabel=[] sortedLabel=[] sortedIndex=[] outDist={} for m in range(0,3): initialLabel.append(0) sortedLabel.append(0) sortedIndex.append(0) for i in range(0,protLen-2): for j in range(i+1,protLen-1): for k in range(j+1, protLen): global i1,j1,k1 i1=i j1=j k1=k keepLabelIndex={} keepLabelIndex[aminoAcidName[i]]=i keepLabelIndex[aminoAcidName[j]]=j keepLabelIndex[aminoAcidName[k]]=k initialLabel[0]=aminoAcidName[i] initialLabel[1]=aminoAcidName[j] initialLabel[2]=aminoAcidName[k] sortedLabel=list(initialLabel) sortedLabel.sort(reverse=True) #Perform Rule- based labelling if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): dist1_2Temp=calcDist(i,j) dist1_3Temp=calcDist(i,k) dist2_3Temp=calcDist(j,k) if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=j indexOf2=k elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=k indexOf2=j else: indexOf0=j indexOf1=k indexOf2=i elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): for index_ in range(0,3): sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]] indexOf0=sortedIndex[0] indexOf1=sortedIndex[1] indexOf2=sortedIndex[2] elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]): indexOf2=keepLabelIndex[sortedLabel[2]] indices=indexFind(indexOf2,i,j,k) a=indexOf2 b=indices[0] c=indices[1] dist1_3Temp=calcDist(b,a) dist2_3Temp=calcDist(c,a) if dist1_3Temp>=dist2_3Temp: indexOf0=indices[0] indexOf1=indices[1] else: indexOf0=indices[1] indexOf1=indices[0] elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): indexOf0=keepLabelIndex[sortedLabel[0]] indices=indexFind(indexOf0,i,j,k) if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]): indexOf1=indices[0] indexOf2=indices[1] else: indexOf2=indices[0] indexOf1=indices[1] dist01=calcDist(indexOf0,indexOf1) s2=dist01/2 dist02=calcDist(indexOf0,indexOf2) s1=dist02 dist12=dist01 dist03=calcDist(indexOf1,indexOf2) # All lengths calculation all_lengthsList[round(dist01,round_off_to)] += 1 all_lengthsList[round(dist02,round_off_to)] += 1 all_lengthsList[round(dist03,round_off_to)] += 1 maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1 s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2 +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2 +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5 Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 if Theta1<=90: all_angleList[round(Theta1,round_off_to)] +=1 rep_angleList[round(Theta1,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta1),round_off_to)] +=1 rep_angleList[round(abs(180-Theta1),round_off_to)] +=1 #if Theta1>90: # Theta1=abs(180-Theta1) #print 'Second Theta1, ',Theta1 #Theta 2 dist02=calcDist(indexOf1,indexOf0) s1=dist02 dist01=calcDist(indexOf1,indexOf2) s2=dist01/2 s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2 +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2 +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5 Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta2 > 90: # Theta2 = abs(180-Theta2) if Theta2<=90: all_angleList[round(Theta2,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta2),round_off_to)] +=1 #Theta 3 dist02=calcDist(indexOf2,indexOf1) s1=dist02 dist01=calcDist(indexOf2,indexOf0) s2=dist01/2 s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+ ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+ ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5 Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta3 > 90: # Theta3 = abs(180-Theta3) if Theta3<=90: all_angleList[round(Theta3,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta3),round_off_to)] +=1 # Either writting output to a file or using dictionary or # counter will save you from memory exceptions in this case. #all_angleList[round(Theta1,round_off_to)] +=1 #all_angleList[round(Theta2,round_off_to)] +=1 #all_angleList[round(Theta3,round_off_to)] +=1 #rep_angleList[round(Theta1,round_off_to)] +=1 count_t1 = count_t1+1 print 'count_t1:',count_t1 return [all_angleList,rep_angleList,all_lengthsList,maxDist_List] for i in range(1,total_samples +1): #Path where the sample PDB files present. path = '/home/linc/c00219805/Research/Protien_Database/extracted_new_samples/' subfolder="sample"+str(i) +"//" ## Change sample no here for generatig results for different samples #subfolder="sample_4t1//" print subfolder os.chdir(path) files=glob.glob(path+subfolder+"*.pdb")#Change file extension here if not pdb. Others may be .ent aminoAcidCode=open('/home/linc/c00219805/Research/Protien_Database/'+"aminoAcidCode_lexicographic _new.txt","r") #Start the timer start_time=time.time() aminoAcidLabel={} for amino in aminoAcidCode: amino=amino.split() aminoAcidLabel[amino[0]]=int(amino[1]) aminoAcidCode.close() alltheta = Counter() all_repAngle = Counter() all_length = Counter() maxLength = Counter() #Parallel processing to use the power of CPU. This uses 2 cores less than CPU cores for parallelization a = Parallel(n_jobs=cpu_count() - 2, verbose=10, backend="multiprocessing", batch_size="auto")(delayed(processFiles)(fileName) for fileName in files) for t in a: alltheta += t[0] all_repAngle += t[1] all_length += t[2] maxLength += t[3] #Uncomment this for sequential processing and add similar steps for representative angle and lengths # for fileName in files: # alltheta +=processFiles(fileName) #For All thetas df_counts = pd.DataFrame.from_dict(alltheta, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_angles'+str(round_off_to)+setting+'.csv', sep=',') #For representative theta df_counts = pd.DataFrame.from_dict(all_repAngle, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) df_counts.to_csv(path+subfolder+'//theta'+str(round_off_to)+setting+'.csv', sep=',') #For All Lengths df_counts = pd.DataFrame.from_dict(all_length, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'dist123', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_length'+str(round_off_to)+setting+'.csv', sep=',') #For max Dist df_counts = pd.DataFrame.from_dict(maxLength, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'maxDist', 0:'freq'}) df_counts.to_csv(path+subfolder+'//maxDist'+str(round_off_to)+setting+'.csv', sep=',') #End timer and calculate total time taken end_time=time.time() total_time=((end_time)-(start_time)) print ("Code End Angle & Length calculation.") print ("TOTAL TIME IN MIN=",round(total_time/60,0))
"""Calculate Distance between two points in 3D space.""" x1=xCord[indexLabel1] x2=xCord[indexLabel2] y1=yCord[indexLabel1] y2=yCord[indexLabel2] z1=zCord[indexLabel1] z2=zCord[indexLabel2] distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5) return distance
identifier_body
2#allAnglesLengths.py
#!/usr/bin/env python #all_angle_lengths.py """ Calculates all angles, lengths and representative angles and lengths. For a given triple of amino acids, this calculates all the angles , lengths and representative angle and length. The angles are the angles formed by the median of corresponding vertex and opposite edge. Attributes: path: Location where input required resources are stored. subfolder: The sample for which calculations are required. aminoAcidCode: Lexicographic file required for rule-based assignment. """ import math,glob,os,time from collections import Counter import pandas as pd from joblib import Parallel, delayed, cpu_count __author__ = "Sumi Singh, Venkata Sarika Kondra" __version__ = "1.0.2" __maintainer__ = "Venkata Sarika Kondra" __email__ = "[email protected]" round_off_to = 2 total_samples = 12 setting = 'corrected' def calcDist(indexLabel1,indexLabel2): """Calculate Distance between two points in 3D space.""" x1=xCord[indexLabel1] x2=xCord[indexLabel2] y1=yCord[indexLabel1] y2=yCord[indexLabel2] z1=zCord[indexLabel1] z2=zCord[indexLabel2] distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5) return distance def indexFind(index_of_2,i1,j1,k1): if index_of_2==i1: indexOf0=j1 indexOf1=k1 elif index_of_2==j1: indexOf0=i1 indexOf1=k1 elif index_of_2==k1: indexOf0=i1 indexOf1=j1 return indexOf0, indexOf1 def processFiles(fileName): """Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling. Arguments: fileName: The protein file in PDB/ENT format. Returns: all_angleList: A Counter having all angles formed by their medians on opposite edges of the non-collinear triangle formed by the three amino acids at i, j and k and their frequencies of occurences in this protein file rounded to next significant digit. rep_angleList: A Counter having representative angle and its frequency all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed. maxDist: Maximum length among all lengths calculated above. """ print fileName count_t1 = 0 inFile=open(fileName,'r') all_angleList = Counter() rep_angleList = Counter() all_lengthsList = Counter() maxDist_List = Counter() global xCord, yCord, zCord aminoAcidName={} xCord={} yCord={} zCord={} seq_number={} counter=0 for i in inFile: if (i[0:6].rstrip()=="NUMMDL"): numOfModels=i[10:14].rstrip() if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')): break if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1): break if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" : aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]]) xCord[counter]=(float(i[30:38])) yCord[counter]=(float(i[38:46])) zCord[counter]=(float(i[46:54])) seq_number[counter]=str(i[22:27]) counter+=1 protLen=len(yCord) initialLabel=[] sortedLabel=[] sortedIndex=[] outDist={} for m in range(0,3): initialLabel.append(0) sortedLabel.append(0) sortedIndex.append(0) for i in range(0,protLen-2): for j in range(i+1,protLen-1): for k in range(j+1, protLen): global i1,j1,k1 i1=i j1=j k1=k keepLabelIndex={} keepLabelIndex[aminoAcidName[i]]=i keepLabelIndex[aminoAcidName[j]]=j keepLabelIndex[aminoAcidName[k]]=k initialLabel[0]=aminoAcidName[i] initialLabel[1]=aminoAcidName[j] initialLabel[2]=aminoAcidName[k] sortedLabel=list(initialLabel) sortedLabel.sort(reverse=True) #Perform Rule- based labelling if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): dist1_2Temp=calcDist(i,j) dist1_3Temp=calcDist(i,k) dist2_3Temp=calcDist(j,k) if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=j indexOf2=k elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=k indexOf2=j else: indexOf0=j indexOf1=k indexOf2=i elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): for index_ in range(0,3): sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]] indexOf0=sortedIndex[0] indexOf1=sortedIndex[1] indexOf2=sortedIndex[2] elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]): indexOf2=keepLabelIndex[sortedLabel[2]] indices=indexFind(indexOf2,i,j,k) a=indexOf2 b=indices[0] c=indices[1] dist1_3Temp=calcDist(b,a) dist2_3Temp=calcDist(c,a) if dist1_3Temp>=dist2_3Temp: indexOf0=indices[0] indexOf1=indices[1] else: indexOf0=indices[1] indexOf1=indices[0] elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): indexOf0=keepLabelIndex[sortedLabel[0]] indices=indexFind(indexOf0,i,j,k) if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]): indexOf1=indices[0] indexOf2=indices[1] else: indexOf2=indices[0] indexOf1=indices[1] dist01=calcDist(indexOf0,indexOf1) s2=dist01/2 dist02=calcDist(indexOf0,indexOf2) s1=dist02 dist12=dist01 dist03=calcDist(indexOf1,indexOf2) # All lengths calculation all_lengthsList[round(dist01,round_off_to)] += 1 all_lengthsList[round(dist02,round_off_to)] += 1 all_lengthsList[round(dist03,round_off_to)] += 1 maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1 s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2 +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2 +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5 Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 if Theta1<=90: all_angleList[round(Theta1,round_off_to)] +=1 rep_angleList[round(Theta1,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta1),round_off_to)] +=1 rep_angleList[round(abs(180-Theta1),round_off_to)] +=1 #if Theta1>90: # Theta1=abs(180-Theta1) #print 'Second Theta1, ',Theta1 #Theta 2 dist02=calcDist(indexOf1,indexOf0) s1=dist02 dist01=calcDist(indexOf1,indexOf2) s2=dist01/2 s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2 +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2 +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5
Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta2 > 90: # Theta2 = abs(180-Theta2) if Theta2<=90: all_angleList[round(Theta2,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta2),round_off_to)] +=1 #Theta 3 dist02=calcDist(indexOf2,indexOf1) s1=dist02 dist01=calcDist(indexOf2,indexOf0) s2=dist01/2 s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+ ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+ ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5 Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta3 > 90: # Theta3 = abs(180-Theta3) if Theta3<=90: all_angleList[round(Theta3,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta3),round_off_to)] +=1 # Either writting output to a file or using dictionary or # counter will save you from memory exceptions in this case. #all_angleList[round(Theta1,round_off_to)] +=1 #all_angleList[round(Theta2,round_off_to)] +=1 #all_angleList[round(Theta3,round_off_to)] +=1 #rep_angleList[round(Theta1,round_off_to)] +=1 count_t1 = count_t1+1 print 'count_t1:',count_t1 return [all_angleList,rep_angleList,all_lengthsList,maxDist_List] for i in range(1,total_samples +1): #Path where the sample PDB files present. path = '/home/linc/c00219805/Research/Protien_Database/extracted_new_samples/' subfolder="sample"+str(i) +"//" ## Change sample no here for generatig results for different samples #subfolder="sample_4t1//" print subfolder os.chdir(path) files=glob.glob(path+subfolder+"*.pdb")#Change file extension here if not pdb. Others may be .ent aminoAcidCode=open('/home/linc/c00219805/Research/Protien_Database/'+"aminoAcidCode_lexicographic _new.txt","r") #Start the timer start_time=time.time() aminoAcidLabel={} for amino in aminoAcidCode: amino=amino.split() aminoAcidLabel[amino[0]]=int(amino[1]) aminoAcidCode.close() alltheta = Counter() all_repAngle = Counter() all_length = Counter() maxLength = Counter() #Parallel processing to use the power of CPU. This uses 2 cores less than CPU cores for parallelization a = Parallel(n_jobs=cpu_count() - 2, verbose=10, backend="multiprocessing", batch_size="auto")(delayed(processFiles)(fileName) for fileName in files) for t in a: alltheta += t[0] all_repAngle += t[1] all_length += t[2] maxLength += t[3] #Uncomment this for sequential processing and add similar steps for representative angle and lengths # for fileName in files: # alltheta +=processFiles(fileName) #For All thetas df_counts = pd.DataFrame.from_dict(alltheta, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_angles'+str(round_off_to)+setting+'.csv', sep=',') #For representative theta df_counts = pd.DataFrame.from_dict(all_repAngle, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) df_counts.to_csv(path+subfolder+'//theta'+str(round_off_to)+setting+'.csv', sep=',') #For All Lengths df_counts = pd.DataFrame.from_dict(all_length, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'dist123', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_length'+str(round_off_to)+setting+'.csv', sep=',') #For max Dist df_counts = pd.DataFrame.from_dict(maxLength, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'maxDist', 0:'freq'}) df_counts.to_csv(path+subfolder+'//maxDist'+str(round_off_to)+setting+'.csv', sep=',') #End timer and calculate total time taken end_time=time.time() total_time=((end_time)-(start_time)) print ("Code End Angle & Length calculation.") print ("TOTAL TIME IN MIN=",round(total_time/60,0))
random_line_split
2#allAnglesLengths.py
#!/usr/bin/env python #all_angle_lengths.py """ Calculates all angles, lengths and representative angles and lengths. For a given triple of amino acids, this calculates all the angles , lengths and representative angle and length. The angles are the angles formed by the median of corresponding vertex and opposite edge. Attributes: path: Location where input required resources are stored. subfolder: The sample for which calculations are required. aminoAcidCode: Lexicographic file required for rule-based assignment. """ import math,glob,os,time from collections import Counter import pandas as pd from joblib import Parallel, delayed, cpu_count __author__ = "Sumi Singh, Venkata Sarika Kondra" __version__ = "1.0.2" __maintainer__ = "Venkata Sarika Kondra" __email__ = "[email protected]" round_off_to = 2 total_samples = 12 setting = 'corrected' def
(indexLabel1,indexLabel2): """Calculate Distance between two points in 3D space.""" x1=xCord[indexLabel1] x2=xCord[indexLabel2] y1=yCord[indexLabel1] y2=yCord[indexLabel2] z1=zCord[indexLabel1] z2=zCord[indexLabel2] distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5) return distance def indexFind(index_of_2,i1,j1,k1): if index_of_2==i1: indexOf0=j1 indexOf1=k1 elif index_of_2==j1: indexOf0=i1 indexOf1=k1 elif index_of_2==k1: indexOf0=i1 indexOf1=j1 return indexOf0, indexOf1 def processFiles(fileName): """Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling. Arguments: fileName: The protein file in PDB/ENT format. Returns: all_angleList: A Counter having all angles formed by their medians on opposite edges of the non-collinear triangle formed by the three amino acids at i, j and k and their frequencies of occurences in this protein file rounded to next significant digit. rep_angleList: A Counter having representative angle and its frequency all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed. maxDist: Maximum length among all lengths calculated above. """ print fileName count_t1 = 0 inFile=open(fileName,'r') all_angleList = Counter() rep_angleList = Counter() all_lengthsList = Counter() maxDist_List = Counter() global xCord, yCord, zCord aminoAcidName={} xCord={} yCord={} zCord={} seq_number={} counter=0 for i in inFile: if (i[0:6].rstrip()=="NUMMDL"): numOfModels=i[10:14].rstrip() if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')): break if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1): break if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" : aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]]) xCord[counter]=(float(i[30:38])) yCord[counter]=(float(i[38:46])) zCord[counter]=(float(i[46:54])) seq_number[counter]=str(i[22:27]) counter+=1 protLen=len(yCord) initialLabel=[] sortedLabel=[] sortedIndex=[] outDist={} for m in range(0,3): initialLabel.append(0) sortedLabel.append(0) sortedIndex.append(0) for i in range(0,protLen-2): for j in range(i+1,protLen-1): for k in range(j+1, protLen): global i1,j1,k1 i1=i j1=j k1=k keepLabelIndex={} keepLabelIndex[aminoAcidName[i]]=i keepLabelIndex[aminoAcidName[j]]=j keepLabelIndex[aminoAcidName[k]]=k initialLabel[0]=aminoAcidName[i] initialLabel[1]=aminoAcidName[j] initialLabel[2]=aminoAcidName[k] sortedLabel=list(initialLabel) sortedLabel.sort(reverse=True) #Perform Rule- based labelling if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): dist1_2Temp=calcDist(i,j) dist1_3Temp=calcDist(i,k) dist2_3Temp=calcDist(j,k) if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=j indexOf2=k elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=k indexOf2=j else: indexOf0=j indexOf1=k indexOf2=i elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): for index_ in range(0,3): sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]] indexOf0=sortedIndex[0] indexOf1=sortedIndex[1] indexOf2=sortedIndex[2] elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]): indexOf2=keepLabelIndex[sortedLabel[2]] indices=indexFind(indexOf2,i,j,k) a=indexOf2 b=indices[0] c=indices[1] dist1_3Temp=calcDist(b,a) dist2_3Temp=calcDist(c,a) if dist1_3Temp>=dist2_3Temp: indexOf0=indices[0] indexOf1=indices[1] else: indexOf0=indices[1] indexOf1=indices[0] elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): indexOf0=keepLabelIndex[sortedLabel[0]] indices=indexFind(indexOf0,i,j,k) if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]): indexOf1=indices[0] indexOf2=indices[1] else: indexOf2=indices[0] indexOf1=indices[1] dist01=calcDist(indexOf0,indexOf1) s2=dist01/2 dist02=calcDist(indexOf0,indexOf2) s1=dist02 dist12=dist01 dist03=calcDist(indexOf1,indexOf2) # All lengths calculation all_lengthsList[round(dist01,round_off_to)] += 1 all_lengthsList[round(dist02,round_off_to)] += 1 all_lengthsList[round(dist03,round_off_to)] += 1 maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1 s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2 +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2 +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5 Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 if Theta1<=90: all_angleList[round(Theta1,round_off_to)] +=1 rep_angleList[round(Theta1,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta1),round_off_to)] +=1 rep_angleList[round(abs(180-Theta1),round_off_to)] +=1 #if Theta1>90: # Theta1=abs(180-Theta1) #print 'Second Theta1, ',Theta1 #Theta 2 dist02=calcDist(indexOf1,indexOf0) s1=dist02 dist01=calcDist(indexOf1,indexOf2) s2=dist01/2 s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2 +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2 +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5 Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta2 > 90: # Theta2 = abs(180-Theta2) if Theta2<=90: all_angleList[round(Theta2,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta2),round_off_to)] +=1 #Theta 3 dist02=calcDist(indexOf2,indexOf1) s1=dist02 dist01=calcDist(indexOf2,indexOf0) s2=dist01/2 s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+ ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+ ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5 Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta3 > 90: # Theta3 = abs(180-Theta3) if Theta3<=90: all_angleList[round(Theta3,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta3),round_off_to)] +=1 # Either writting output to a file or using dictionary or # counter will save you from memory exceptions in this case. #all_angleList[round(Theta1,round_off_to)] +=1 #all_angleList[round(Theta2,round_off_to)] +=1 #all_angleList[round(Theta3,round_off_to)] +=1 #rep_angleList[round(Theta1,round_off_to)] +=1 count_t1 = count_t1+1 print 'count_t1:',count_t1 return [all_angleList,rep_angleList,all_lengthsList,maxDist_List] for i in range(1,total_samples +1): #Path where the sample PDB files present. path = '/home/linc/c00219805/Research/Protien_Database/extracted_new_samples/' subfolder="sample"+str(i) +"//" ## Change sample no here for generatig results for different samples #subfolder="sample_4t1//" print subfolder os.chdir(path) files=glob.glob(path+subfolder+"*.pdb")#Change file extension here if not pdb. Others may be .ent aminoAcidCode=open('/home/linc/c00219805/Research/Protien_Database/'+"aminoAcidCode_lexicographic _new.txt","r") #Start the timer start_time=time.time() aminoAcidLabel={} for amino in aminoAcidCode: amino=amino.split() aminoAcidLabel[amino[0]]=int(amino[1]) aminoAcidCode.close() alltheta = Counter() all_repAngle = Counter() all_length = Counter() maxLength = Counter() #Parallel processing to use the power of CPU. This uses 2 cores less than CPU cores for parallelization a = Parallel(n_jobs=cpu_count() - 2, verbose=10, backend="multiprocessing", batch_size="auto")(delayed(processFiles)(fileName) for fileName in files) for t in a: alltheta += t[0] all_repAngle += t[1] all_length += t[2] maxLength += t[3] #Uncomment this for sequential processing and add similar steps for representative angle and lengths # for fileName in files: # alltheta +=processFiles(fileName) #For All thetas df_counts = pd.DataFrame.from_dict(alltheta, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_angles'+str(round_off_to)+setting+'.csv', sep=',') #For representative theta df_counts = pd.DataFrame.from_dict(all_repAngle, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) df_counts.to_csv(path+subfolder+'//theta'+str(round_off_to)+setting+'.csv', sep=',') #For All Lengths df_counts = pd.DataFrame.from_dict(all_length, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'dist123', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_length'+str(round_off_to)+setting+'.csv', sep=',') #For max Dist df_counts = pd.DataFrame.from_dict(maxLength, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'maxDist', 0:'freq'}) df_counts.to_csv(path+subfolder+'//maxDist'+str(round_off_to)+setting+'.csv', sep=',') #End timer and calculate total time taken end_time=time.time() total_time=((end_time)-(start_time)) print ("Code End Angle & Length calculation.") print ("TOTAL TIME IN MIN=",round(total_time/60,0))
calcDist
identifier_name
2#allAnglesLengths.py
#!/usr/bin/env python #all_angle_lengths.py """ Calculates all angles, lengths and representative angles and lengths. For a given triple of amino acids, this calculates all the angles , lengths and representative angle and length. The angles are the angles formed by the median of corresponding vertex and opposite edge. Attributes: path: Location where input required resources are stored. subfolder: The sample for which calculations are required. aminoAcidCode: Lexicographic file required for rule-based assignment. """ import math,glob,os,time from collections import Counter import pandas as pd from joblib import Parallel, delayed, cpu_count __author__ = "Sumi Singh, Venkata Sarika Kondra" __version__ = "1.0.2" __maintainer__ = "Venkata Sarika Kondra" __email__ = "[email protected]" round_off_to = 2 total_samples = 12 setting = 'corrected' def calcDist(indexLabel1,indexLabel2): """Calculate Distance between two points in 3D space.""" x1=xCord[indexLabel1] x2=xCord[indexLabel2] y1=yCord[indexLabel1] y2=yCord[indexLabel2] z1=zCord[indexLabel1] z2=zCord[indexLabel2] distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5) return distance def indexFind(index_of_2,i1,j1,k1): if index_of_2==i1: indexOf0=j1 indexOf1=k1 elif index_of_2==j1: indexOf0=i1 indexOf1=k1 elif index_of_2==k1: indexOf0=i1 indexOf1=j1 return indexOf0, indexOf1 def processFiles(fileName): """Calculates all angles, all lengths, representative angle and maxDist after performing rule-based labelling. Arguments: fileName: The protein file in PDB/ENT format. Returns: all_angleList: A Counter having all angles formed by their medians on opposite edges of the non-collinear triangle formed by the three amino acids at i, j and k and their frequencies of occurences in this protein file rounded to next significant digit. rep_angleList: A Counter having representative angle and its frequency all_lengthsList: A counter having lengths of all edges of the non-collinear triangle formed. maxDist: Maximum length among all lengths calculated above. """ print fileName count_t1 = 0 inFile=open(fileName,'r') all_angleList = Counter() rep_angleList = Counter() all_lengthsList = Counter() maxDist_List = Counter() global xCord, yCord, zCord aminoAcidName={} xCord={} yCord={} zCord={} seq_number={} counter=0 for i in inFile: if (i[0:6].rstrip()=="NUMMDL"): numOfModels=i[10:14].rstrip() if ((i[0:6].rstrip()=="ENDMDL")or (i[0:6].rstrip()=='TER')): break if (i[0:6].rstrip()=="MODEL" and int(i[10:14].rstrip())>1): break if(i[0:4].rstrip())=="ATOM" and(i[13:15].rstrip())=="CA" and(i[16]=='A'or i[16]==' ')and i[17:20]!= "UNK" : aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]]) xCord[counter]=(float(i[30:38])) yCord[counter]=(float(i[38:46])) zCord[counter]=(float(i[46:54])) seq_number[counter]=str(i[22:27]) counter+=1 protLen=len(yCord) initialLabel=[] sortedLabel=[] sortedIndex=[] outDist={} for m in range(0,3): initialLabel.append(0) sortedLabel.append(0) sortedIndex.append(0) for i in range(0,protLen-2): for j in range(i+1,protLen-1): for k in range(j+1, protLen): global i1,j1,k1 i1=i j1=j k1=k keepLabelIndex={} keepLabelIndex[aminoAcidName[i]]=i keepLabelIndex[aminoAcidName[j]]=j keepLabelIndex[aminoAcidName[k]]=k initialLabel[0]=aminoAcidName[i] initialLabel[1]=aminoAcidName[j] initialLabel[2]=aminoAcidName[k] sortedLabel=list(initialLabel) sortedLabel.sort(reverse=True) #Perform Rule- based labelling if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): dist1_2Temp=calcDist(i,j) dist1_3Temp=calcDist(i,k) dist2_3Temp=calcDist(j,k) if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=j indexOf2=k elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)): indexOf0=i indexOf1=k indexOf2=j else: indexOf0=j indexOf1=k indexOf2=i elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]):
elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]): indexOf2=keepLabelIndex[sortedLabel[2]] indices=indexFind(indexOf2,i,j,k) a=indexOf2 b=indices[0] c=indices[1] dist1_3Temp=calcDist(b,a) dist2_3Temp=calcDist(c,a) if dist1_3Temp>=dist2_3Temp: indexOf0=indices[0] indexOf1=indices[1] else: indexOf0=indices[1] indexOf1=indices[0] elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]): indexOf0=keepLabelIndex[sortedLabel[0]] indices=indexFind(indexOf0,i,j,k) if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]): indexOf1=indices[0] indexOf2=indices[1] else: indexOf2=indices[0] indexOf1=indices[1] dist01=calcDist(indexOf0,indexOf1) s2=dist01/2 dist02=calcDist(indexOf0,indexOf2) s1=dist02 dist12=dist01 dist03=calcDist(indexOf1,indexOf2) # All lengths calculation all_lengthsList[round(dist01,round_off_to)] += 1 all_lengthsList[round(dist02,round_off_to)] += 1 all_lengthsList[round(dist03,round_off_to)] += 1 maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1 s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2 +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2 +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5 Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 if Theta1<=90: all_angleList[round(Theta1,round_off_to)] +=1 rep_angleList[round(Theta1,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta1),round_off_to)] +=1 rep_angleList[round(abs(180-Theta1),round_off_to)] +=1 #if Theta1>90: # Theta1=abs(180-Theta1) #print 'Second Theta1, ',Theta1 #Theta 2 dist02=calcDist(indexOf1,indexOf0) s1=dist02 dist01=calcDist(indexOf1,indexOf2) s2=dist01/2 s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2 +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2 +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5 Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta2 > 90: # Theta2 = abs(180-Theta2) if Theta2<=90: all_angleList[round(Theta2,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta2),round_off_to)] +=1 #Theta 3 dist02=calcDist(indexOf2,indexOf1) s1=dist02 dist01=calcDist(indexOf2,indexOf0) s2=dist01/2 s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+ ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+ ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5 Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 #if Theta3 > 90: # Theta3 = abs(180-Theta3) if Theta3<=90: all_angleList[round(Theta3,round_off_to)] +=1 else: all_angleList[round(abs(180-Theta3),round_off_to)] +=1 # Either writting output to a file or using dictionary or # counter will save you from memory exceptions in this case. #all_angleList[round(Theta1,round_off_to)] +=1 #all_angleList[round(Theta2,round_off_to)] +=1 #all_angleList[round(Theta3,round_off_to)] +=1 #rep_angleList[round(Theta1,round_off_to)] +=1 count_t1 = count_t1+1 print 'count_t1:',count_t1 return [all_angleList,rep_angleList,all_lengthsList,maxDist_List] for i in range(1,total_samples +1): #Path where the sample PDB files present. path = '/home/linc/c00219805/Research/Protien_Database/extracted_new_samples/' subfolder="sample"+str(i) +"//" ## Change sample no here for generatig results for different samples #subfolder="sample_4t1//" print subfolder os.chdir(path) files=glob.glob(path+subfolder+"*.pdb")#Change file extension here if not pdb. Others may be .ent aminoAcidCode=open('/home/linc/c00219805/Research/Protien_Database/'+"aminoAcidCode_lexicographic _new.txt","r") #Start the timer start_time=time.time() aminoAcidLabel={} for amino in aminoAcidCode: amino=amino.split() aminoAcidLabel[amino[0]]=int(amino[1]) aminoAcidCode.close() alltheta = Counter() all_repAngle = Counter() all_length = Counter() maxLength = Counter() #Parallel processing to use the power of CPU. This uses 2 cores less than CPU cores for parallelization a = Parallel(n_jobs=cpu_count() - 2, verbose=10, backend="multiprocessing", batch_size="auto")(delayed(processFiles)(fileName) for fileName in files) for t in a: alltheta += t[0] all_repAngle += t[1] all_length += t[2] maxLength += t[3] #Uncomment this for sequential processing and add similar steps for representative angle and lengths # for fileName in files: # alltheta +=processFiles(fileName) #For All thetas df_counts = pd.DataFrame.from_dict(alltheta, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_angles'+str(round_off_to)+setting+'.csv', sep=',') #For representative theta df_counts = pd.DataFrame.from_dict(all_repAngle, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'theta', 0:'freq'}) df_counts.to_csv(path+subfolder+'//theta'+str(round_off_to)+setting+'.csv', sep=',') #For All Lengths df_counts = pd.DataFrame.from_dict(all_length, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'dist123', 0:'freq'}) print df_counts df_counts.to_csv(path+subfolder+'//all_length'+str(round_off_to)+setting+'.csv', sep=',') #For max Dist df_counts = pd.DataFrame.from_dict(maxLength, orient='index').reset_index() df_counts = df_counts.rename(columns={'index':'maxDist', 0:'freq'}) df_counts.to_csv(path+subfolder+'//maxDist'+str(round_off_to)+setting+'.csv', sep=',') #End timer and calculate total time taken end_time=time.time() total_time=((end_time)-(start_time)) print ("Code End Angle & Length calculation.") print ("TOTAL TIME IN MIN=",round(total_time/60,0))
for index_ in range(0,3): sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]] indexOf0=sortedIndex[0] indexOf1=sortedIndex[1] indexOf2=sortedIndex[2]
conditional_block
kmz.go
// cutkmz subcommands // // Other than root.go, each of these go files is a cutkmz subcommand implementation // // - kmz - produces a KMZ with input JPG chopped into 1024x1024 tiles // - bigkmz - produces a KMZ containing input JPG as is for higher resolution uses such as Google Earth package cmd import ( "archive/zip" "bytes" "flag" "fmt" "io" "io/ioutil" "math" "os" "os/exec" "path/filepath" "strconv" "strings" "text/template" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( convProg = "convert" // img mgck. "gm convert" poss identifyProg = "identify" // "gm identify" ditto ) const kmlHdrTmpl = `<?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>{{ .Name }}</name> ` const kmlOverlayTmpl = ` <GroundOverlay> <name>{{ .Name }}</name> <color>bdffffff</color> <drawOrder>{{ .DrawingOrder }} </drawOrder> <Icon> <href>{{ .TileFileName }}</href> <viewBoundScale>1.0</viewBoundScale> </Icon> <LatLonBox> <north>{{ .North }}</north> <south>{{ .South }} </south> <east>{{ .East }}</east> <west>{{ .West }}</west> <rotation>0.0</rotation> </LatLonBox> </GroundOverlay> ` const kmlFtr = `</Document> </kml> ` const ( north int = iota // index into [4]float64 assoc dec. degrees south east west ) // mapTile holds an image filepath, its lat/long bounding box and // pixel width & height type mapTile struct { fpath string // file path of tile image width int // Tile width in pixels height int // Tile height in pixels box [4]float64 // lat&long bounding box in decimal degrees } // NewMapTile populates a map tile using the given width and height // instead of extracting it from the given file path. Panics if North // < South or cross a pole. func newMapTile(fpath string, pixWid, pixHigh int, n, s, e, w float64) *mapTile { if n > 90 || s < -90 || n < s { panic("No crossing a pole and map's North must be greater than South") } rv := &mapTile{ fpath: fpath, width: pixWid, height: pixHigh, box: [4]float64{n, s, normEasting(e), normEasting(w)}, } return rv } // NewMapTileFromFile reads in given file path and creates a map tile // with the filepath and pix width & height from the image. func newMapTileFromFile(fpath string, n, s, e, w float64) (*mapTile, error) { wid, high, err := imageWxH(fpath) if err != nil { return nil, err } return newMapTile(fpath, wid, high, n, s, e, w), nil } var kmzCmd = &cobra.Command{ Use: "kmz", Short: "Creates .kmz from a JPG with map tiles small enough for a Garmin GPS", Long: `Creates .kmz map tiles for a Garmin from a larger geo-poisitioned map image. Tested on a 62s & 64s Crunches and converts a raster image (.jpg,.gif,.tiff etc) to match what Garmin devices can handle wrt resolution and max tile-size. Rather than expect metadata files with geo-positioning information for the jpg, cutkmz expects it to be encoded into the file's name, "name-geo-anchored". Harder to lose. For example: Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg Underscores are required: <map-name>_<North-lat>_<South-lat>_<East-long>_<West-long>.<fmt> Garmin limits the max tiles per model (100 on 62s, 500 on Montana, Oregon 600 series and GPSMAP 64 series. Tiles of more than 1 megapixel (w*h) add no additional clarity. If you have a large image, it will be reduced in quality until it can be chopped in max-tiles or less 1024x1024 chunks. Connect your GPS via USB and copy the generated kmz files into /Garmin/CustomMap (SD or main mem). Garmin limitations on .kmz files and the images in them: * image must be jpeg, not 'progressive' * only considers the /doc.kml in the .kmz * tiles over 1MP, e.g. > 1024x1024 or 512x2048 etc pixels do not add increased resolution * each tile jpeg should be less than 3MB. * Max images/tiles per device: typically 100. 500 on some. * smaller image files are rendered faster Requires the imagemagick to be installed on your system, and uses its "convert" and "identify" programs `, Run: func(cmd *cobra.Command, args []string) { if err := process(viper.GetViper(), args); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "see 'cutkmz kmz -h' for help\n") os.Exit(1) } }, } func init() { RootCmd.AddCommand(kmzCmd) kmzCmd.Flags().StringP("image", "i", "", "image file named with its bounding box in decimal degrees.") viper.BindPFlag("image", kmzCmd.Flags().Lookup("image")) kmzCmd.Flags().IntP("max_tiles", "t", 100, "max # pieces to cut jpg into. Beware of device limits.") viper.BindPFlag("max_tiles", kmzCmd.Flags().Lookup("max_tiles")) kmzCmd.Flags().IntP("drawing_order", "d", 51, "Garmins make values > 50 visible. Tune if have overlapping overlays.") viper.BindPFlag("drawing_order", kmzCmd.Flags().Lookup("drawing_order")) kmzCmd.Flags().BoolP("keep_tmp", "k", false, "Don't delete intermediate files from $TMPDIR.") viper.BindPFlag("keep_tmp", kmzCmd.Flags().Lookup("keep_tmp")) kmzCmd.Flags().AddGoFlagSet(flag.CommandLine) flag.CommandLine.VisitAll(func(f *flag.Flag) { viper.BindPFlag(f.Name, kmzCmd.Flags().Lookup(f.Name)) }) flag.CommandLine.Parse(nil) // shut up 'not parsed' complaints } // getBox returns map name & lat/long bounding box by extracing it // from the given file name. The Float slice is in order: northLat, // southLat, eastLong, westLong in decimal degrees func getBox(image string) (base string, box []float64, err error) { c := strings.Split(image, "_") if len(c) != 5 { err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg") return } base = filepath.Base(c[0]) for i := 1; i < 5; i++ { if i == 4 { s := strings.SplitN(c[i], ".", 3) if len(s) == 3 { c[i] = s[0] + "." + s[1] } } f, err := strconv.ParseFloat(c[i], 64) if err != nil { err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err) return "", nil, err } box = append(box, f) } if box[north] <= box[south] || box[north] > 90 || box[south] < -90 { return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]") } return } // imageWxH returns the width and height of image file in pixels func imageWxH(imageFilename string) (width int, height int, err error) { if _, err := os.Stat(imageFilename); os.IsNotExist(err) { return 0, 0, err } cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename) glog.Infof("About to run: %#v\n", cmd.Args) var b []byte b, err = cmd.Output() if err != nil { return 0, 0, err } wh := bytes.Split(b, []byte(" ")) if len(wh) != 2 { return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b) } width, err = strconv.Atoi(string(wh[0])) if err != nil { return } height, err = strconv.Atoi(string(wh[1])) if err != nil { return } return } // process the name-geo-anchored files args into KMZs. Uses // "max_tiles" and and "drawing_order" from viper if present. func process(v *viper.Viper, args []string) error { maxTiles := v.GetInt("max_tiles") drawingOrder := v.GetInt("drawing_order") keepTmp := v.GetBool("keep_tmp") fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp) if len(args) == 0 { return fmt.Errorf("Image file required: must provide one or more imaage file path") } for _, image := range args { if _, err := os.Stat(image); os.IsNotExist(err) { return err } absImage, err := filepath.Abs(image) if err != nil { return fmt.Errorf("Issue with an image file path: %v", err) } base, box, err := getBox(absImage) if err != nil { return fmt.Errorf("Error with image file name: %v", err) } origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west]) if err != nil { return fmt.Errorf("Error extracting image dimensions: %v", err) } maxPixels := maxTiles * 1024 * 1024 tmpDir, err := ioutil.TempDir("", "cutkmz-") if err != nil { return fmt.Errorf("Error creating a temporary directory: %v", err) } tilesDir := filepath.Join(tmpDir, base, "tiles") err = os.MkdirAll(tilesDir, 0755) if err != nil { return fmt.Errorf("Error making tiles dir in tmp dir: %v", err) } fixedJpg := filepath.Join(tmpDir, "fixed.jpg") if maxPixels < (origMap.height * origMap.width) { resizeFixToJpg(fixedJpg, absImage, maxPixels) } else { fixToJpg(fixedJpg, absImage) } // Need to know pixel width of map from which we // chopped the tiles so we know which row a tile is // in. Knowing the tile's row allows us to set its // bounding box correctly. fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west]) if err != nil { return err } // chop chop chop. bork. bork bork. chopToJpgs(fixedJpg, tilesDir, base) var kdocWtr *os.File if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil { return err } if err = startKML(kdocWtr, base); err != nil { return err } // For each jpg tile create an entry in the kml file // with its bounding box. Imagemagick crop+adjoin // chopped & numbered the tile image files // lexocographically ascending starting from top left // (000) (NW) eastwards & then down to bottom right // (SE). ReadDir gives sorted result. var tileFiles []os.FileInfo if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil { return err } var widthSum int currNorth := fixedMap.box[north] currWest := fixedMap.box[west] for _, tf := range tileFiles { tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest) if err != nil { return err } // righmost tiles might be narrower, bottom // ones shorter so must re-compute S & E edge // for each tile; cannot assume all same // size. Also double checks assumption that // chopping preserves number of pixels finishTileBox(tile, fixedMap) var relTPath string // file ref inside KML must be relative to kmz root if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil { return err } if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil { return err } widthSum += tile.width if widthSum >= fixedMap.width { // drop down a row currNorth = tile.box[south] currWest = fixedMap.box[west] widthSum = 0 } else { currWest = tile.box[east] } } endKML(kdocWtr) kdocWtr.Close() var zf *os.File if zf, err = os.Create(base + ".kmz"); err != nil { return err } zipd(filepath.Join(tmpDir, base), zf) zf.Close() if !keepTmp { err = os.RemoveAll(tmpDir) if err != nil { return fmt.Errorf("Error removing tmp dir & contents: %v", err) } } } return nil } func startKML(w io.Writer, name string) error { t, err := template.New("kmlhdr").Parse(kmlHdrTmpl) if err != nil { return err } root := struct{ Name string }{name} return t.Execute(w, &root) } func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error { t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl) if err != nil { return err } root := struct { Name string TileFileName string DrawingOrder int North float64 South float64 East float64 West float64 }{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]} return t.Execute(w, &root) } func endKML(w io.Writer) error
// finishTileBox completes the tile.box by setting its east and south // boundaries relative to its current north and west values using the // tile pixel size reltative to the full map size. func finishTileBox(tile, fullMap *mapTile) { nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height) tile.box[south] = tile.box[north] - nsDeltaDeg tile.box[east] = tile.box[west] + ewDeltaDeg } // delta returns the how many degrees further South the bottom of the // tile is than the top, and how many degrees further east the east // edge of the tile is than the west, given the tile width & height in // pixels, the map's bounding box in decimal degrees, and the map's // total width and height in pixels func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) { nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south]) ewDeg := eastDelta(box[east], box[west]) ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg return } // eastDelta returns the positve decimal degrees difference between the // given east and west longitudes func eastDelta(e, w float64) float64 { e = normEasting(e) w = normEasting(w) if e < w { return 360 + e - w } return e - w } // normEasting returns the given longitude in dec degress normalized to be within [-180,180] func normEasting(deg float64) float64 { // go's Mod fcn preserves sign on first param if deg < -180 { return math.Mod(deg+180, 360) + 180 } if deg > 180 { return math.Mod(deg-180, 360) - 180 } return deg } func resizeFixToJpg(outFile, inFile string, maxPixArea int) error { // param order super sensitive cmd := exec.Command("convert", "-resize", "@"+fmt.Sprintf("%v", maxPixArea), inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func fixToJpg(outFile, inFile string) error { cmd := exec.Command("convert", inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func chopToJpgs(fixedJpg, outDir, baseName string) error { outFile := filepath.Join(outDir, baseName+"_tile_%03d.jpg") cmd := exec.Command("convert", "-crop", "1024x1024", fixedJpg, "+adjoin", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } // zipd makes a zip archive of the given dirctory and writes it to the // writer. Paths in the zip archive are relative to the base name of // the given directory. func zipd(dir string, w io.Writer) error { z := zip.NewWriter(w) defer func() { if err := z.Flush(); err != nil { fmt.Printf("Error flushing ZIP writer: %v\n", err) } if err := z.Close(); err != nil { fmt.Printf("Error closing ZIP writer: %v\n", err) } }() filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } rel, err := filepath.Rel(dir, path) if err != nil { return err } r, err := os.Open(path) if err != nil { return err } defer r.Close() zw, err := z.Create(rel) if err != nil { return err } _, err = io.Copy(zw, r) if err != nil { return err } return nil }) return nil }
{ t, err := template.New("kmlftr").Parse(kmlFtr) if err != nil { return err } return t.Execute(w, nil) }
identifier_body
kmz.go
// cutkmz subcommands // // Other than root.go, each of these go files is a cutkmz subcommand implementation // // - kmz - produces a KMZ with input JPG chopped into 1024x1024 tiles // - bigkmz - produces a KMZ containing input JPG as is for higher resolution uses such as Google Earth package cmd import ( "archive/zip" "bytes" "flag" "fmt" "io" "io/ioutil" "math" "os" "os/exec" "path/filepath" "strconv" "strings" "text/template" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( convProg = "convert" // img mgck. "gm convert" poss identifyProg = "identify" // "gm identify" ditto ) const kmlHdrTmpl = `<?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>{{ .Name }}</name> ` const kmlOverlayTmpl = ` <GroundOverlay> <name>{{ .Name }}</name> <color>bdffffff</color> <drawOrder>{{ .DrawingOrder }} </drawOrder> <Icon> <href>{{ .TileFileName }}</href> <viewBoundScale>1.0</viewBoundScale> </Icon> <LatLonBox> <north>{{ .North }}</north> <south>{{ .South }} </south> <east>{{ .East }}</east> <west>{{ .West }}</west> <rotation>0.0</rotation> </LatLonBox> </GroundOverlay> ` const kmlFtr = `</Document> </kml> ` const ( north int = iota // index into [4]float64 assoc dec. degrees south east west ) // mapTile holds an image filepath, its lat/long bounding box and // pixel width & height type mapTile struct { fpath string // file path of tile image width int // Tile width in pixels height int // Tile height in pixels box [4]float64 // lat&long bounding box in decimal degrees } // NewMapTile populates a map tile using the given width and height // instead of extracting it from the given file path. Panics if North // < South or cross a pole. func newMapTile(fpath string, pixWid, pixHigh int, n, s, e, w float64) *mapTile { if n > 90 || s < -90 || n < s { panic("No crossing a pole and map's North must be greater than South") } rv := &mapTile{ fpath: fpath, width: pixWid, height: pixHigh, box: [4]float64{n, s, normEasting(e), normEasting(w)}, } return rv } // NewMapTileFromFile reads in given file path and creates a map tile // with the filepath and pix width & height from the image. func newMapTileFromFile(fpath string, n, s, e, w float64) (*mapTile, error) { wid, high, err := imageWxH(fpath) if err != nil { return nil, err } return newMapTile(fpath, wid, high, n, s, e, w), nil } var kmzCmd = &cobra.Command{ Use: "kmz", Short: "Creates .kmz from a JPG with map tiles small enough for a Garmin GPS", Long: `Creates .kmz map tiles for a Garmin from a larger geo-poisitioned map image. Tested on a 62s & 64s Crunches and converts a raster image (.jpg,.gif,.tiff etc) to match what Garmin devices can handle wrt resolution and max tile-size. Rather than expect metadata files with geo-positioning information for the jpg, cutkmz expects it to be encoded into the file's name, "name-geo-anchored". Harder to lose. For example: Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg Underscores are required: <map-name>_<North-lat>_<South-lat>_<East-long>_<West-long>.<fmt> Garmin limits the max tiles per model (100 on 62s, 500 on Montana, Oregon 600 series and GPSMAP 64 series. Tiles of more than 1 megapixel (w*h) add no additional clarity. If you have a large image, it will be reduced in quality until it can be chopped in max-tiles or less 1024x1024 chunks. Connect your GPS via USB and copy the generated kmz files into /Garmin/CustomMap (SD or main mem). Garmin limitations on .kmz files and the images in them: * image must be jpeg, not 'progressive' * only considers the /doc.kml in the .kmz * tiles over 1MP, e.g. > 1024x1024 or 512x2048 etc pixels do not add increased resolution * each tile jpeg should be less than 3MB. * Max images/tiles per device: typically 100. 500 on some. * smaller image files are rendered faster Requires the imagemagick to be installed on your system, and uses its "convert" and "identify" programs `, Run: func(cmd *cobra.Command, args []string) { if err := process(viper.GetViper(), args); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "see 'cutkmz kmz -h' for help\n") os.Exit(1) } }, } func init() { RootCmd.AddCommand(kmzCmd) kmzCmd.Flags().StringP("image", "i", "", "image file named with its bounding box in decimal degrees.") viper.BindPFlag("image", kmzCmd.Flags().Lookup("image")) kmzCmd.Flags().IntP("max_tiles", "t", 100, "max # pieces to cut jpg into. Beware of device limits.") viper.BindPFlag("max_tiles", kmzCmd.Flags().Lookup("max_tiles")) kmzCmd.Flags().IntP("drawing_order", "d", 51, "Garmins make values > 50 visible. Tune if have overlapping overlays.") viper.BindPFlag("drawing_order", kmzCmd.Flags().Lookup("drawing_order")) kmzCmd.Flags().BoolP("keep_tmp", "k", false, "Don't delete intermediate files from $TMPDIR.") viper.BindPFlag("keep_tmp", kmzCmd.Flags().Lookup("keep_tmp")) kmzCmd.Flags().AddGoFlagSet(flag.CommandLine) flag.CommandLine.VisitAll(func(f *flag.Flag) { viper.BindPFlag(f.Name, kmzCmd.Flags().Lookup(f.Name)) }) flag.CommandLine.Parse(nil) // shut up 'not parsed' complaints } // getBox returns map name & lat/long bounding box by extracing it // from the given file name. The Float slice is in order: northLat, // southLat, eastLong, westLong in decimal degrees func getBox(image string) (base string, box []float64, err error) { c := strings.Split(image, "_") if len(c) != 5 { err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg") return } base = filepath.Base(c[0]) for i := 1; i < 5; i++ { if i == 4 { s := strings.SplitN(c[i], ".", 3) if len(s) == 3 { c[i] = s[0] + "." + s[1] } } f, err := strconv.ParseFloat(c[i], 64) if err != nil { err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err) return "", nil, err } box = append(box, f) } if box[north] <= box[south] || box[north] > 90 || box[south] < -90 { return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]") } return } // imageWxH returns the width and height of image file in pixels func imageWxH(imageFilename string) (width int, height int, err error) { if _, err := os.Stat(imageFilename); os.IsNotExist(err) { return 0, 0, err } cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename) glog.Infof("About to run: %#v\n", cmd.Args) var b []byte b, err = cmd.Output() if err != nil { return 0, 0, err } wh := bytes.Split(b, []byte(" ")) if len(wh) != 2 { return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b) } width, err = strconv.Atoi(string(wh[0])) if err != nil { return } height, err = strconv.Atoi(string(wh[1])) if err != nil { return } return } // process the name-geo-anchored files args into KMZs. Uses // "max_tiles" and and "drawing_order" from viper if present. func process(v *viper.Viper, args []string) error { maxTiles := v.GetInt("max_tiles") drawingOrder := v.GetInt("drawing_order") keepTmp := v.GetBool("keep_tmp") fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp) if len(args) == 0 { return fmt.Errorf("Image file required: must provide one or more imaage file path") } for _, image := range args { if _, err := os.Stat(image); os.IsNotExist(err) { return err } absImage, err := filepath.Abs(image) if err != nil { return fmt.Errorf("Issue with an image file path: %v", err) } base, box, err := getBox(absImage) if err != nil { return fmt.Errorf("Error with image file name: %v", err) } origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west]) if err != nil { return fmt.Errorf("Error extracting image dimensions: %v", err) } maxPixels := maxTiles * 1024 * 1024 tmpDir, err := ioutil.TempDir("", "cutkmz-") if err != nil { return fmt.Errorf("Error creating a temporary directory: %v", err) } tilesDir := filepath.Join(tmpDir, base, "tiles") err = os.MkdirAll(tilesDir, 0755) if err != nil { return fmt.Errorf("Error making tiles dir in tmp dir: %v", err) } fixedJpg := filepath.Join(tmpDir, "fixed.jpg") if maxPixels < (origMap.height * origMap.width) { resizeFixToJpg(fixedJpg, absImage, maxPixels) } else { fixToJpg(fixedJpg, absImage) } // Need to know pixel width of map from which we // chopped the tiles so we know which row a tile is // in. Knowing the tile's row allows us to set its // bounding box correctly. fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west]) if err != nil { return err } // chop chop chop. bork. bork bork. chopToJpgs(fixedJpg, tilesDir, base) var kdocWtr *os.File if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil { return err } if err = startKML(kdocWtr, base); err != nil { return err } // For each jpg tile create an entry in the kml file // with its bounding box. Imagemagick crop+adjoin // chopped & numbered the tile image files // lexocographically ascending starting from top left // (000) (NW) eastwards & then down to bottom right // (SE). ReadDir gives sorted result. var tileFiles []os.FileInfo if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil { return err } var widthSum int currNorth := fixedMap.box[north] currWest := fixedMap.box[west] for _, tf := range tileFiles { tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest) if err != nil { return err } // righmost tiles might be narrower, bottom // ones shorter so must re-compute S & E edge // for each tile; cannot assume all same // size. Also double checks assumption that // chopping preserves number of pixels finishTileBox(tile, fixedMap) var relTPath string // file ref inside KML must be relative to kmz root if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil { return err } if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil { return err } widthSum += tile.width if widthSum >= fixedMap.width { // drop down a row currNorth = tile.box[south] currWest = fixedMap.box[west] widthSum = 0 } else { currWest = tile.box[east] } } endKML(kdocWtr) kdocWtr.Close() var zf *os.File if zf, err = os.Create(base + ".kmz"); err != nil { return err } zipd(filepath.Join(tmpDir, base), zf) zf.Close() if !keepTmp { err = os.RemoveAll(tmpDir) if err != nil { return fmt.Errorf("Error removing tmp dir & contents: %v", err) } } } return nil } func startKML(w io.Writer, name string) error { t, err := template.New("kmlhdr").Parse(kmlHdrTmpl) if err != nil { return err } root := struct{ Name string }{name} return t.Execute(w, &root) } func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error { t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl) if err != nil { return err } root := struct { Name string TileFileName string DrawingOrder int North float64 South float64 East float64 West float64 }{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]} return t.Execute(w, &root) } func endKML(w io.Writer) error { t, err := template.New("kmlftr").Parse(kmlFtr) if err != nil { return err } return t.Execute(w, nil) } // finishTileBox completes the tile.box by setting its east and south // boundaries relative to its current north and west values using the // tile pixel size reltative to the full map size. func finishTileBox(tile, fullMap *mapTile) { nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height) tile.box[south] = tile.box[north] - nsDeltaDeg tile.box[east] = tile.box[west] + ewDeltaDeg } // delta returns the how many degrees further South the bottom of the // tile is than the top, and how many degrees further east the east // edge of the tile is than the west, given the tile width & height in // pixels, the map's bounding box in decimal degrees, and the map's // total width and height in pixels func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) { nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south]) ewDeg := eastDelta(box[east], box[west]) ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg return } // eastDelta returns the positve decimal degrees difference between the // given east and west longitudes func eastDelta(e, w float64) float64 { e = normEasting(e) w = normEasting(w) if e < w { return 360 + e - w } return e - w } // normEasting returns the given longitude in dec degress normalized to be within [-180,180] func normEasting(deg float64) float64 { // go's Mod fcn preserves sign on first param if deg < -180 { return math.Mod(deg+180, 360) + 180 } if deg > 180 { return math.Mod(deg-180, 360) - 180 } return deg } func
(outFile, inFile string, maxPixArea int) error { // param order super sensitive cmd := exec.Command("convert", "-resize", "@"+fmt.Sprintf("%v", maxPixArea), inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func fixToJpg(outFile, inFile string) error { cmd := exec.Command("convert", inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func chopToJpgs(fixedJpg, outDir, baseName string) error { outFile := filepath.Join(outDir, baseName+"_tile_%03d.jpg") cmd := exec.Command("convert", "-crop", "1024x1024", fixedJpg, "+adjoin", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } // zipd makes a zip archive of the given dirctory and writes it to the // writer. Paths in the zip archive are relative to the base name of // the given directory. func zipd(dir string, w io.Writer) error { z := zip.NewWriter(w) defer func() { if err := z.Flush(); err != nil { fmt.Printf("Error flushing ZIP writer: %v\n", err) } if err := z.Close(); err != nil { fmt.Printf("Error closing ZIP writer: %v\n", err) } }() filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } rel, err := filepath.Rel(dir, path) if err != nil { return err } r, err := os.Open(path) if err != nil { return err } defer r.Close() zw, err := z.Create(rel) if err != nil { return err } _, err = io.Copy(zw, r) if err != nil { return err } return nil }) return nil }
resizeFixToJpg
identifier_name
kmz.go
// cutkmz subcommands // // Other than root.go, each of these go files is a cutkmz subcommand implementation // // - kmz - produces a KMZ with input JPG chopped into 1024x1024 tiles // - bigkmz - produces a KMZ containing input JPG as is for higher resolution uses such as Google Earth package cmd import ( "archive/zip" "bytes" "flag" "fmt" "io" "io/ioutil" "math" "os" "os/exec" "path/filepath" "strconv" "strings" "text/template" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( convProg = "convert" // img mgck. "gm convert" poss identifyProg = "identify" // "gm identify" ditto ) const kmlHdrTmpl = `<?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>{{ .Name }}</name> ` const kmlOverlayTmpl = ` <GroundOverlay> <name>{{ .Name }}</name> <color>bdffffff</color> <drawOrder>{{ .DrawingOrder }} </drawOrder> <Icon> <href>{{ .TileFileName }}</href> <viewBoundScale>1.0</viewBoundScale> </Icon> <LatLonBox> <north>{{ .North }}</north> <south>{{ .South }} </south> <east>{{ .East }}</east> <west>{{ .West }}</west> <rotation>0.0</rotation> </LatLonBox> </GroundOverlay> ` const kmlFtr = `</Document> </kml> ` const ( north int = iota // index into [4]float64 assoc dec. degrees south east west ) // mapTile holds an image filepath, its lat/long bounding box and // pixel width & height type mapTile struct { fpath string // file path of tile image width int // Tile width in pixels height int // Tile height in pixels box [4]float64 // lat&long bounding box in decimal degrees } // NewMapTile populates a map tile using the given width and height // instead of extracting it from the given file path. Panics if North // < South or cross a pole. func newMapTile(fpath string, pixWid, pixHigh int, n, s, e, w float64) *mapTile { if n > 90 || s < -90 || n < s { panic("No crossing a pole and map's North must be greater than South") } rv := &mapTile{ fpath: fpath, width: pixWid, height: pixHigh, box: [4]float64{n, s, normEasting(e), normEasting(w)}, } return rv } // NewMapTileFromFile reads in given file path and creates a map tile // with the filepath and pix width & height from the image. func newMapTileFromFile(fpath string, n, s, e, w float64) (*mapTile, error) { wid, high, err := imageWxH(fpath) if err != nil { return nil, err } return newMapTile(fpath, wid, high, n, s, e, w), nil } var kmzCmd = &cobra.Command{ Use: "kmz", Short: "Creates .kmz from a JPG with map tiles small enough for a Garmin GPS", Long: `Creates .kmz map tiles for a Garmin from a larger geo-poisitioned map image. Tested on a 62s & 64s Crunches and converts a raster image (.jpg,.gif,.tiff etc) to match what Garmin devices can handle wrt resolution and max tile-size. Rather than expect metadata files with geo-positioning information for the jpg, cutkmz expects it to be encoded into the file's name, "name-geo-anchored". Harder to lose. For example: Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg Underscores are required: <map-name>_<North-lat>_<South-lat>_<East-long>_<West-long>.<fmt> Garmin limits the max tiles per model (100 on 62s, 500 on Montana, Oregon 600 series and GPSMAP 64 series. Tiles of more than 1 megapixel (w*h) add no additional clarity. If you have a large image, it will be reduced in quality until it can be chopped in max-tiles or less 1024x1024 chunks. Connect your GPS via USB and copy the generated kmz files into /Garmin/CustomMap (SD or main mem). Garmin limitations on .kmz files and the images in them: * image must be jpeg, not 'progressive' * only considers the /doc.kml in the .kmz * tiles over 1MP, e.g. > 1024x1024 or 512x2048 etc pixels do not add increased resolution * each tile jpeg should be less than 3MB. * Max images/tiles per device: typically 100. 500 on some. * smaller image files are rendered faster Requires the imagemagick to be installed on your system, and uses its "convert" and "identify" programs
if err := process(viper.GetViper(), args); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "see 'cutkmz kmz -h' for help\n") os.Exit(1) } }, } func init() { RootCmd.AddCommand(kmzCmd) kmzCmd.Flags().StringP("image", "i", "", "image file named with its bounding box in decimal degrees.") viper.BindPFlag("image", kmzCmd.Flags().Lookup("image")) kmzCmd.Flags().IntP("max_tiles", "t", 100, "max # pieces to cut jpg into. Beware of device limits.") viper.BindPFlag("max_tiles", kmzCmd.Flags().Lookup("max_tiles")) kmzCmd.Flags().IntP("drawing_order", "d", 51, "Garmins make values > 50 visible. Tune if have overlapping overlays.") viper.BindPFlag("drawing_order", kmzCmd.Flags().Lookup("drawing_order")) kmzCmd.Flags().BoolP("keep_tmp", "k", false, "Don't delete intermediate files from $TMPDIR.") viper.BindPFlag("keep_tmp", kmzCmd.Flags().Lookup("keep_tmp")) kmzCmd.Flags().AddGoFlagSet(flag.CommandLine) flag.CommandLine.VisitAll(func(f *flag.Flag) { viper.BindPFlag(f.Name, kmzCmd.Flags().Lookup(f.Name)) }) flag.CommandLine.Parse(nil) // shut up 'not parsed' complaints } // getBox returns map name & lat/long bounding box by extracing it // from the given file name. The Float slice is in order: northLat, // southLat, eastLong, westLong in decimal degrees func getBox(image string) (base string, box []float64, err error) { c := strings.Split(image, "_") if len(c) != 5 { err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg") return } base = filepath.Base(c[0]) for i := 1; i < 5; i++ { if i == 4 { s := strings.SplitN(c[i], ".", 3) if len(s) == 3 { c[i] = s[0] + "." + s[1] } } f, err := strconv.ParseFloat(c[i], 64) if err != nil { err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err) return "", nil, err } box = append(box, f) } if box[north] <= box[south] || box[north] > 90 || box[south] < -90 { return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]") } return } // imageWxH returns the width and height of image file in pixels func imageWxH(imageFilename string) (width int, height int, err error) { if _, err := os.Stat(imageFilename); os.IsNotExist(err) { return 0, 0, err } cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename) glog.Infof("About to run: %#v\n", cmd.Args) var b []byte b, err = cmd.Output() if err != nil { return 0, 0, err } wh := bytes.Split(b, []byte(" ")) if len(wh) != 2 { return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b) } width, err = strconv.Atoi(string(wh[0])) if err != nil { return } height, err = strconv.Atoi(string(wh[1])) if err != nil { return } return } // process the name-geo-anchored files args into KMZs. Uses // "max_tiles" and and "drawing_order" from viper if present. func process(v *viper.Viper, args []string) error { maxTiles := v.GetInt("max_tiles") drawingOrder := v.GetInt("drawing_order") keepTmp := v.GetBool("keep_tmp") fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp) if len(args) == 0 { return fmt.Errorf("Image file required: must provide one or more imaage file path") } for _, image := range args { if _, err := os.Stat(image); os.IsNotExist(err) { return err } absImage, err := filepath.Abs(image) if err != nil { return fmt.Errorf("Issue with an image file path: %v", err) } base, box, err := getBox(absImage) if err != nil { return fmt.Errorf("Error with image file name: %v", err) } origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west]) if err != nil { return fmt.Errorf("Error extracting image dimensions: %v", err) } maxPixels := maxTiles * 1024 * 1024 tmpDir, err := ioutil.TempDir("", "cutkmz-") if err != nil { return fmt.Errorf("Error creating a temporary directory: %v", err) } tilesDir := filepath.Join(tmpDir, base, "tiles") err = os.MkdirAll(tilesDir, 0755) if err != nil { return fmt.Errorf("Error making tiles dir in tmp dir: %v", err) } fixedJpg := filepath.Join(tmpDir, "fixed.jpg") if maxPixels < (origMap.height * origMap.width) { resizeFixToJpg(fixedJpg, absImage, maxPixels) } else { fixToJpg(fixedJpg, absImage) } // Need to know pixel width of map from which we // chopped the tiles so we know which row a tile is // in. Knowing the tile's row allows us to set its // bounding box correctly. fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west]) if err != nil { return err } // chop chop chop. bork. bork bork. chopToJpgs(fixedJpg, tilesDir, base) var kdocWtr *os.File if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil { return err } if err = startKML(kdocWtr, base); err != nil { return err } // For each jpg tile create an entry in the kml file // with its bounding box. Imagemagick crop+adjoin // chopped & numbered the tile image files // lexocographically ascending starting from top left // (000) (NW) eastwards & then down to bottom right // (SE). ReadDir gives sorted result. var tileFiles []os.FileInfo if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil { return err } var widthSum int currNorth := fixedMap.box[north] currWest := fixedMap.box[west] for _, tf := range tileFiles { tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest) if err != nil { return err } // righmost tiles might be narrower, bottom // ones shorter so must re-compute S & E edge // for each tile; cannot assume all same // size. Also double checks assumption that // chopping preserves number of pixels finishTileBox(tile, fixedMap) var relTPath string // file ref inside KML must be relative to kmz root if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil { return err } if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil { return err } widthSum += tile.width if widthSum >= fixedMap.width { // drop down a row currNorth = tile.box[south] currWest = fixedMap.box[west] widthSum = 0 } else { currWest = tile.box[east] } } endKML(kdocWtr) kdocWtr.Close() var zf *os.File if zf, err = os.Create(base + ".kmz"); err != nil { return err } zipd(filepath.Join(tmpDir, base), zf) zf.Close() if !keepTmp { err = os.RemoveAll(tmpDir) if err != nil { return fmt.Errorf("Error removing tmp dir & contents: %v", err) } } } return nil } func startKML(w io.Writer, name string) error { t, err := template.New("kmlhdr").Parse(kmlHdrTmpl) if err != nil { return err } root := struct{ Name string }{name} return t.Execute(w, &root) } func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error { t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl) if err != nil { return err } root := struct { Name string TileFileName string DrawingOrder int North float64 South float64 East float64 West float64 }{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]} return t.Execute(w, &root) } func endKML(w io.Writer) error { t, err := template.New("kmlftr").Parse(kmlFtr) if err != nil { return err } return t.Execute(w, nil) } // finishTileBox completes the tile.box by setting its east and south // boundaries relative to its current north and west values using the // tile pixel size reltative to the full map size. func finishTileBox(tile, fullMap *mapTile) { nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height) tile.box[south] = tile.box[north] - nsDeltaDeg tile.box[east] = tile.box[west] + ewDeltaDeg } // delta returns the how many degrees further South the bottom of the // tile is than the top, and how many degrees further east the east // edge of the tile is than the west, given the tile width & height in // pixels, the map's bounding box in decimal degrees, and the map's // total width and height in pixels func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) { nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south]) ewDeg := eastDelta(box[east], box[west]) ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg return } // eastDelta returns the positve decimal degrees difference between the // given east and west longitudes func eastDelta(e, w float64) float64 { e = normEasting(e) w = normEasting(w) if e < w { return 360 + e - w } return e - w } // normEasting returns the given longitude in dec degress normalized to be within [-180,180] func normEasting(deg float64) float64 { // go's Mod fcn preserves sign on first param if deg < -180 { return math.Mod(deg+180, 360) + 180 } if deg > 180 { return math.Mod(deg-180, 360) - 180 } return deg } func resizeFixToJpg(outFile, inFile string, maxPixArea int) error { // param order super sensitive cmd := exec.Command("convert", "-resize", "@"+fmt.Sprintf("%v", maxPixArea), inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func fixToJpg(outFile, inFile string) error { cmd := exec.Command("convert", inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func chopToJpgs(fixedJpg, outDir, baseName string) error { outFile := filepath.Join(outDir, baseName+"_tile_%03d.jpg") cmd := exec.Command("convert", "-crop", "1024x1024", fixedJpg, "+adjoin", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } // zipd makes a zip archive of the given dirctory and writes it to the // writer. Paths in the zip archive are relative to the base name of // the given directory. func zipd(dir string, w io.Writer) error { z := zip.NewWriter(w) defer func() { if err := z.Flush(); err != nil { fmt.Printf("Error flushing ZIP writer: %v\n", err) } if err := z.Close(); err != nil { fmt.Printf("Error closing ZIP writer: %v\n", err) } }() filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } rel, err := filepath.Rel(dir, path) if err != nil { return err } r, err := os.Open(path) if err != nil { return err } defer r.Close() zw, err := z.Create(rel) if err != nil { return err } _, err = io.Copy(zw, r) if err != nil { return err } return nil }) return nil }
`, Run: func(cmd *cobra.Command, args []string) {
random_line_split
kmz.go
// cutkmz subcommands // // Other than root.go, each of these go files is a cutkmz subcommand implementation // // - kmz - produces a KMZ with input JPG chopped into 1024x1024 tiles // - bigkmz - produces a KMZ containing input JPG as is for higher resolution uses such as Google Earth package cmd import ( "archive/zip" "bytes" "flag" "fmt" "io" "io/ioutil" "math" "os" "os/exec" "path/filepath" "strconv" "strings" "text/template" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/viper" ) const ( convProg = "convert" // img mgck. "gm convert" poss identifyProg = "identify" // "gm identify" ditto ) const kmlHdrTmpl = `<?xml version="1.0" encoding="UTF-8"?> <kml xmlns="http://www.opengis.net/kml/2.2"> <Document> <name>{{ .Name }}</name> ` const kmlOverlayTmpl = ` <GroundOverlay> <name>{{ .Name }}</name> <color>bdffffff</color> <drawOrder>{{ .DrawingOrder }} </drawOrder> <Icon> <href>{{ .TileFileName }}</href> <viewBoundScale>1.0</viewBoundScale> </Icon> <LatLonBox> <north>{{ .North }}</north> <south>{{ .South }} </south> <east>{{ .East }}</east> <west>{{ .West }}</west> <rotation>0.0</rotation> </LatLonBox> </GroundOverlay> ` const kmlFtr = `</Document> </kml> ` const ( north int = iota // index into [4]float64 assoc dec. degrees south east west ) // mapTile holds an image filepath, its lat/long bounding box and // pixel width & height type mapTile struct { fpath string // file path of tile image width int // Tile width in pixels height int // Tile height in pixels box [4]float64 // lat&long bounding box in decimal degrees } // NewMapTile populates a map tile using the given width and height // instead of extracting it from the given file path. Panics if North // < South or cross a pole. func newMapTile(fpath string, pixWid, pixHigh int, n, s, e, w float64) *mapTile { if n > 90 || s < -90 || n < s { panic("No crossing a pole and map's North must be greater than South") } rv := &mapTile{ fpath: fpath, width: pixWid, height: pixHigh, box: [4]float64{n, s, normEasting(e), normEasting(w)}, } return rv } // NewMapTileFromFile reads in given file path and creates a map tile // with the filepath and pix width & height from the image. func newMapTileFromFile(fpath string, n, s, e, w float64) (*mapTile, error) { wid, high, err := imageWxH(fpath) if err != nil { return nil, err } return newMapTile(fpath, wid, high, n, s, e, w), nil } var kmzCmd = &cobra.Command{ Use: "kmz", Short: "Creates .kmz from a JPG with map tiles small enough for a Garmin GPS", Long: `Creates .kmz map tiles for a Garmin from a larger geo-poisitioned map image. Tested on a 62s & 64s Crunches and converts a raster image (.jpg,.gif,.tiff etc) to match what Garmin devices can handle wrt resolution and max tile-size. Rather than expect metadata files with geo-positioning information for the jpg, cutkmz expects it to be encoded into the file's name, "name-geo-anchored". Harder to lose. For example: Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg Underscores are required: <map-name>_<North-lat>_<South-lat>_<East-long>_<West-long>.<fmt> Garmin limits the max tiles per model (100 on 62s, 500 on Montana, Oregon 600 series and GPSMAP 64 series. Tiles of more than 1 megapixel (w*h) add no additional clarity. If you have a large image, it will be reduced in quality until it can be chopped in max-tiles or less 1024x1024 chunks. Connect your GPS via USB and copy the generated kmz files into /Garmin/CustomMap (SD or main mem). Garmin limitations on .kmz files and the images in them: * image must be jpeg, not 'progressive' * only considers the /doc.kml in the .kmz * tiles over 1MP, e.g. > 1024x1024 or 512x2048 etc pixels do not add increased resolution * each tile jpeg should be less than 3MB. * Max images/tiles per device: typically 100. 500 on some. * smaller image files are rendered faster Requires the imagemagick to be installed on your system, and uses its "convert" and "identify" programs `, Run: func(cmd *cobra.Command, args []string) { if err := process(viper.GetViper(), args); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) fmt.Fprintf(os.Stderr, "see 'cutkmz kmz -h' for help\n") os.Exit(1) } }, } func init() { RootCmd.AddCommand(kmzCmd) kmzCmd.Flags().StringP("image", "i", "", "image file named with its bounding box in decimal degrees.") viper.BindPFlag("image", kmzCmd.Flags().Lookup("image")) kmzCmd.Flags().IntP("max_tiles", "t", 100, "max # pieces to cut jpg into. Beware of device limits.") viper.BindPFlag("max_tiles", kmzCmd.Flags().Lookup("max_tiles")) kmzCmd.Flags().IntP("drawing_order", "d", 51, "Garmins make values > 50 visible. Tune if have overlapping overlays.") viper.BindPFlag("drawing_order", kmzCmd.Flags().Lookup("drawing_order")) kmzCmd.Flags().BoolP("keep_tmp", "k", false, "Don't delete intermediate files from $TMPDIR.") viper.BindPFlag("keep_tmp", kmzCmd.Flags().Lookup("keep_tmp")) kmzCmd.Flags().AddGoFlagSet(flag.CommandLine) flag.CommandLine.VisitAll(func(f *flag.Flag) { viper.BindPFlag(f.Name, kmzCmd.Flags().Lookup(f.Name)) }) flag.CommandLine.Parse(nil) // shut up 'not parsed' complaints } // getBox returns map name & lat/long bounding box by extracing it // from the given file name. The Float slice is in order: northLat, // southLat, eastLong, westLong in decimal degrees func getBox(image string) (base string, box []float64, err error) { c := strings.Split(image, "_") if len(c) != 5 { err = fmt.Errorf("File name must include bounding box name_N_S_E_W.jpg in decimal degrees, e.g. Grouse-Mountain_49.336694_49.470628_-123.132056_-122.9811.jpg") return } base = filepath.Base(c[0]) for i := 1; i < 5; i++ { if i == 4 { s := strings.SplitN(c[i], ".", 3) if len(s) == 3 { c[i] = s[0] + "." + s[1] } } f, err := strconv.ParseFloat(c[i], 64) if err != nil { err = fmt.Errorf("Error parsing lat/long degrees in file name: %v", err) return "", nil, err } box = append(box, f) } if box[north] <= box[south] || box[north] > 90 || box[south] < -90 { return base, box, fmt.Errorf("North boundary must be greater than south boundary and in [-90,90]") } return } // imageWxH returns the width and height of image file in pixels func imageWxH(imageFilename string) (width int, height int, err error) { if _, err := os.Stat(imageFilename); os.IsNotExist(err) { return 0, 0, err } cmd := exec.Command(identifyProg, "-format", "%w %h", imageFilename) glog.Infof("About to run: %#v\n", cmd.Args) var b []byte b, err = cmd.Output() if err != nil { return 0, 0, err } wh := bytes.Split(b, []byte(" ")) if len(wh) != 2 { return 0, 0, fmt.Errorf("Expected two ints separated by space, but got: %v", b) } width, err = strconv.Atoi(string(wh[0])) if err != nil { return } height, err = strconv.Atoi(string(wh[1])) if err != nil { return } return } // process the name-geo-anchored files args into KMZs. Uses // "max_tiles" and and "drawing_order" from viper if present. func process(v *viper.Viper, args []string) error { maxTiles := v.GetInt("max_tiles") drawingOrder := v.GetInt("drawing_order") keepTmp := v.GetBool("keep_tmp") fmt.Printf("maxTiles %v, drawingOrder: %v, keepTmp: %v\n", maxTiles, drawingOrder, keepTmp) if len(args) == 0 { return fmt.Errorf("Image file required: must provide one or more imaage file path") } for _, image := range args { if _, err := os.Stat(image); os.IsNotExist(err) { return err } absImage, err := filepath.Abs(image) if err != nil { return fmt.Errorf("Issue with an image file path: %v", err) } base, box, err := getBox(absImage) if err != nil { return fmt.Errorf("Error with image file name: %v", err) } origMap, err := newMapTileFromFile(absImage, box[north], box[south], box[east], box[west]) if err != nil { return fmt.Errorf("Error extracting image dimensions: %v", err) } maxPixels := maxTiles * 1024 * 1024 tmpDir, err := ioutil.TempDir("", "cutkmz-") if err != nil { return fmt.Errorf("Error creating a temporary directory: %v", err) } tilesDir := filepath.Join(tmpDir, base, "tiles") err = os.MkdirAll(tilesDir, 0755) if err != nil { return fmt.Errorf("Error making tiles dir in tmp dir: %v", err) } fixedJpg := filepath.Join(tmpDir, "fixed.jpg") if maxPixels < (origMap.height * origMap.width) { resizeFixToJpg(fixedJpg, absImage, maxPixels) } else { fixToJpg(fixedJpg, absImage) } // Need to know pixel width of map from which we // chopped the tiles so we know which row a tile is // in. Knowing the tile's row allows us to set its // bounding box correctly. fixedMap, err := newMapTileFromFile(fixedJpg, box[north], box[south], box[east], box[west]) if err != nil { return err } // chop chop chop. bork. bork bork. chopToJpgs(fixedJpg, tilesDir, base) var kdocWtr *os.File if kdocWtr, err = os.Create(filepath.Join(tmpDir, base, "doc.kml")); err != nil { return err } if err = startKML(kdocWtr, base); err != nil { return err } // For each jpg tile create an entry in the kml file // with its bounding box. Imagemagick crop+adjoin // chopped & numbered the tile image files // lexocographically ascending starting from top left // (000) (NW) eastwards & then down to bottom right // (SE). ReadDir gives sorted result. var tileFiles []os.FileInfo if tileFiles, err = ioutil.ReadDir(tilesDir); err != nil { return err } var widthSum int currNorth := fixedMap.box[north] currWest := fixedMap.box[west] for _, tf := range tileFiles { tile, err := newMapTileFromFile(filepath.Join(tilesDir, tf.Name()), currNorth, 0, 0, currWest) if err != nil { return err } // righmost tiles might be narrower, bottom // ones shorter so must re-compute S & E edge // for each tile; cannot assume all same // size. Also double checks assumption that // chopping preserves number of pixels finishTileBox(tile, fixedMap) var relTPath string // file ref inside KML must be relative to kmz root if relTPath, err = filepath.Rel(filepath.Join(tmpDir, base), tile.fpath); err != nil { return err } if err = kmlAddOverlay(kdocWtr, tf.Name(), tile.box, drawingOrder, relTPath); err != nil { return err } widthSum += tile.width if widthSum >= fixedMap.width { // drop down a row currNorth = tile.box[south] currWest = fixedMap.box[west] widthSum = 0 } else { currWest = tile.box[east] } } endKML(kdocWtr) kdocWtr.Close() var zf *os.File if zf, err = os.Create(base + ".kmz"); err != nil { return err } zipd(filepath.Join(tmpDir, base), zf) zf.Close() if !keepTmp { err = os.RemoveAll(tmpDir) if err != nil { return fmt.Errorf("Error removing tmp dir & contents: %v", err) } } } return nil } func startKML(w io.Writer, name string) error { t, err := template.New("kmlhdr").Parse(kmlHdrTmpl) if err != nil { return err } root := struct{ Name string }{name} return t.Execute(w, &root) } func kmlAddOverlay(w io.Writer, tileName string, tbox [4]float64, drawingOrder int, relTileFile string) error { t, err := template.New("kmloverlay").Parse(kmlOverlayTmpl) if err != nil { return err } root := struct { Name string TileFileName string DrawingOrder int North float64 South float64 East float64 West float64 }{tileName, relTileFile, drawingOrder, tbox[north], tbox[south], tbox[east], tbox[west]} return t.Execute(w, &root) } func endKML(w io.Writer) error { t, err := template.New("kmlftr").Parse(kmlFtr) if err != nil { return err } return t.Execute(w, nil) } // finishTileBox completes the tile.box by setting its east and south // boundaries relative to its current north and west values using the // tile pixel size reltative to the full map size. func finishTileBox(tile, fullMap *mapTile) { nsDeltaDeg, ewDeltaDeg := delta(tile.width, tile.height, fullMap.box, fullMap.width, fullMap.height) tile.box[south] = tile.box[north] - nsDeltaDeg tile.box[east] = tile.box[west] + ewDeltaDeg } // delta returns the how many degrees further South the bottom of the // tile is than the top, and how many degrees further east the east // edge of the tile is than the west, given the tile width & height in // pixels, the map's bounding box in decimal degrees, and the map's // total width and height in pixels func delta(tileWidth, tileHeight int, box [4]float64, totWidth, totHeight int) (nsDeltaDeg float64, ewDeltaDeg float64) { nsDeltaDeg = (float64(tileHeight) / float64(totHeight)) * (box[north] - box[south]) ewDeg := eastDelta(box[east], box[west]) ewDeltaDeg = (float64(tileWidth) / float64(totWidth)) * ewDeg return } // eastDelta returns the positve decimal degrees difference between the // given east and west longitudes func eastDelta(e, w float64) float64 { e = normEasting(e) w = normEasting(w) if e < w
return e - w } // normEasting returns the given longitude in dec degress normalized to be within [-180,180] func normEasting(deg float64) float64 { // go's Mod fcn preserves sign on first param if deg < -180 { return math.Mod(deg+180, 360) + 180 } if deg > 180 { return math.Mod(deg-180, 360) - 180 } return deg } func resizeFixToJpg(outFile, inFile string, maxPixArea int) error { // param order super sensitive cmd := exec.Command("convert", "-resize", "@"+fmt.Sprintf("%v", maxPixArea), inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func fixToJpg(outFile, inFile string) error { cmd := exec.Command("convert", inFile, "-strip", "-interlace", "none", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } func chopToJpgs(fixedJpg, outDir, baseName string) error { outFile := filepath.Join(outDir, baseName+"_tile_%03d.jpg") cmd := exec.Command("convert", "-crop", "1024x1024", fixedJpg, "+adjoin", outFile) glog.Infof("About to run: %#v\n", cmd.Args) _, err := cmd.Output() if err != nil { return err } return nil } // zipd makes a zip archive of the given dirctory and writes it to the // writer. Paths in the zip archive are relative to the base name of // the given directory. func zipd(dir string, w io.Writer) error { z := zip.NewWriter(w) defer func() { if err := z.Flush(); err != nil { fmt.Printf("Error flushing ZIP writer: %v\n", err) } if err := z.Close(); err != nil { fmt.Printf("Error closing ZIP writer: %v\n", err) } }() filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if info.IsDir() { return nil } rel, err := filepath.Rel(dir, path) if err != nil { return err } r, err := os.Open(path) if err != nil { return err } defer r.Close() zw, err := z.Create(rel) if err != nil { return err } _, err = io.Copy(zw, r) if err != nil { return err } return nil }) return nil }
{ return 360 + e - w }
conditional_block
redis.go
// Copyright 2020 gorse Project Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cache import ( "context" "fmt" "github.com/go-redis/redis/v9" "github.com/juju/errors" "github.com/zhenghaoz/gorse/storage" "net/url" "strconv" "strings" "time" ) func ParseRedisClusterURL(redisURL string) (*redis.ClusterOptions, error) { options := &redis.ClusterOptions{} uri := redisURL var err error if strings.HasPrefix(redisURL, storage.RedisClusterPrefix) { uri = uri[len(storage.RedisClusterPrefix):] } else { return nil, fmt.Errorf("scheme must be \"redis+cluster\"") } if idx := strings.Index(uri, "@"); idx != -1 { userInfo := uri[:idx] uri = uri[idx+1:] username := userInfo var password string if idx := strings.Index(userInfo, ":"); idx != -1 { username = userInfo[:idx] password = userInfo[idx+1:] } // Validate and process the username. if strings.Contains(username, "/") { return nil, fmt.Errorf("unescaped slash in username") } options.Username, err = url.PathUnescape(username) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid username")) } // Validate and process the password. if strings.Contains(password, ":") { return nil, fmt.Errorf("unescaped colon in password") } if strings.Contains(password, "/") { return nil, fmt.Errorf("unescaped slash in password") } options.Password, err = url.PathUnescape(password) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid password")) } } // fetch the hosts field hosts := uri if idx := strings.IndexAny(uri, "/?@"); idx != -1 { if uri[idx] == '@' { return nil, fmt.Errorf("unescaped @ sign in user info") } hosts = uri[:idx] } options.Addrs = strings.Split(hosts, ",") uri = uri[len(hosts):] if len(uri) > 0 && uri[0] == '/' { uri = uri[1:] } // grab connection arguments from URI connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) if err != nil { return nil, err } for _, pair := range connectionArgsFromQueryString { err = addOption(options, pair) if err != nil { return nil, err } } return options, nil } func extractQueryArgsFromURI(uri string) ([]string, error) { if len(uri) == 0 { return nil, nil } if uri[0] != '?' { return nil, errors.New("must have a ? separator between path and query") } uri = uri[1:] if len(uri) == 0 { return nil, nil } return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil } type optionHandler struct { int *int bool *bool duration *time.Duration } func addOption(options *redis.ClusterOptions, pair string) error { kv := strings.SplitN(pair, "=", 2) if len(kv) != 2 || kv[0] == "" { return fmt.Errorf("invalid option") } key, err := url.QueryUnescape(kv[0]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0])) } value, err := url.QueryUnescape(kv[1]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1])) } handlers := map[string]optionHandler{ "max_retries": {int: &options.MaxRetries}, "min_retry_backoff": {duration: &options.MinRetryBackoff}, "max_retry_backoff": {duration: &options.MaxRetryBackoff}, "dial_timeout": {duration: &options.DialTimeout}, "read_timeout": {duration: &options.ReadTimeout}, "write_timeout": {duration: &options.WriteTimeout}, "pool_fifo": {bool: &options.PoolFIFO}, "pool_size": {int: &options.PoolSize}, "pool_timeout": {duration: &options.PoolTimeout}, "min_idle_conns": {int: &options.MinIdleConns}, "max_idle_conns": {int: &options.MaxIdleConns}, "conn_max_idle_time": {duration: &options.ConnMaxIdleTime}, "conn_max_lifetime": {duration: &options.ConnMaxLifetime}, } lowerKey := strings.ToLower(key) if handler, ok := handlers[lowerKey]; ok { if handler.int != nil { *handler.int, err = strconv.Atoi(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.duration != nil { *handler.duration, err = time.ParseDuration(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.bool != nil { *handler.bool, err = strconv.ParseBool(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } return nil } // Redis cache storage. type Redis struct { storage.TablePrefix client redis.UniversalClient } // Close redis connection. func (r *Redis) Close() error { return r.client.Close() } func (r *Redis) Ping() error { return r.client.Ping(context.Background()).Err() } // Init nothing. func (r *Redis) Init() error { return nil } func (r *Redis) Scan(work func(string) error) error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.scan(ctx, client, work) }) } else { return r.scan(ctx, r.client, work) } } func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } for _, key := range result { if err = work(key[len(r.TablePrefix):]); err != nil { return errors.Trace(err) } } if cursor == 0 { return nil } } } func (r *Redis) Purge() error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.purge(ctx, client, isCluster) }) } else { return r.purge(ctx, r.client, isCluster) } } func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } if len(result) > 0 { if isCluster { p := client.Pipeline() for _, key := range result { if err = p.Del(ctx, key).Err(); err != nil { return errors.Trace(err) } } if _, err = p.Exec(ctx); err != nil { return errors.Trace(err) } } else { if err = client.Del(ctx, result...).Err(); err != nil { return errors.Trace(err) } } } if cursor == 0 { return nil } } } func (r *Redis) Set(ctx context.Context, values ...Value) error { p := r.client.Pipeline() for _, v := range values { if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil { return errors.Trace(err) } } _, err := p.Exec(ctx) return errors.Trace(err) } // Get returns a value from Redis. func (r *Redis) Get(ctx context.Context, key string) *ReturnValue { val, err := r.client.Get(ctx, r.Key(key)).Result() if err != nil { if err == redis.Nil { return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)} } return &ReturnValue{err: err} } return &ReturnValue{value: val} } // Delete object from Redis. func (r *Redis) Delete(ctx context.Context, key string) error { return r.client.Del(ctx, r.Key(key)).Err() } // GetSet returns members of a set from Redis. func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) { return r.client.SMembers(ctx, r.Key(key)).Result() } // SetSet overrides a set with members in Redis. func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) pipeline.SAdd(ctx, r.Key(key), values...) _, err := pipeline.Exec(ctx) return err } // AddSet adds members to a set in Redis. func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set return r.client.SAdd(ctx, r.Key(key), values...).Err() } // RemSet removes members from a set in Redis. func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } return r.client.SRem(ctx, r.Key(key), members).Err() } // GetSorted get scores from sorted set. func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) { members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) { members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{ Min: strconv.FormatFloat(begin, 'g', -1, 64), Max: strconv.FormatFloat(end, 'g', -1, 64), Offset: 0, Count: -1, }).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error { return r.client.ZRemRangeByScore(ctx, r.Key(key), strconv.FormatFloat(begin, 'g', -1, 64), strconv.FormatFloat(end, 'g', -1, 64)). Err() } // AddSorted add scores to sorted set. func (r *Redis)
(ctx context.Context, sortedSets ...SortedSet) error { p := r.client.Pipeline() for _, sorted := range sortedSets { if len(sorted.scores) > 0 { members := make([]redis.Z, 0, len(sorted.scores)) for _, score := range sorted.scores { members = append(members, redis.Z{Member: score.Id, Score: score.Score}) } p.ZAdd(ctx, r.Key(sorted.name), members...) } } _, err := p.Exec(ctx) return err } // SetSorted set scores in sorted set and clear previous scores. func (r *Redis) SetSorted(ctx context.Context, key string, scores []Scored) error { members := make([]redis.Z, 0, len(scores)) for _, score := range scores { members = append(members, redis.Z{Member: score.Id, Score: float64(score.Score)}) } pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) if len(scores) > 0 { pipeline.ZAdd(ctx, r.Key(key), members...) } _, err := pipeline.Exec(ctx) return err } // RemSorted method of NoDatabase returns ErrNoDatabase. func (r *Redis) RemSorted(ctx context.Context, members ...SetMember) error { if len(members) == 0 { return nil } pipe := r.client.Pipeline() for _, member := range members { pipe.ZRem(ctx, r.Key(member.name), member.member) } _, err := pipe.Exec(ctx) return errors.Trace(err) }
AddSorted
identifier_name
redis.go
// Copyright 2020 gorse Project Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cache import ( "context" "fmt" "github.com/go-redis/redis/v9" "github.com/juju/errors" "github.com/zhenghaoz/gorse/storage" "net/url" "strconv" "strings" "time" ) func ParseRedisClusterURL(redisURL string) (*redis.ClusterOptions, error) { options := &redis.ClusterOptions{} uri := redisURL var err error if strings.HasPrefix(redisURL, storage.RedisClusterPrefix) { uri = uri[len(storage.RedisClusterPrefix):] } else { return nil, fmt.Errorf("scheme must be \"redis+cluster\"") } if idx := strings.Index(uri, "@"); idx != -1 { userInfo := uri[:idx] uri = uri[idx+1:] username := userInfo var password string if idx := strings.Index(userInfo, ":"); idx != -1 { username = userInfo[:idx] password = userInfo[idx+1:] } // Validate and process the username. if strings.Contains(username, "/") { return nil, fmt.Errorf("unescaped slash in username") } options.Username, err = url.PathUnescape(username) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid username")) } // Validate and process the password. if strings.Contains(password, ":") { return nil, fmt.Errorf("unescaped colon in password") } if strings.Contains(password, "/") { return nil, fmt.Errorf("unescaped slash in password") } options.Password, err = url.PathUnescape(password) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid password")) } } // fetch the hosts field hosts := uri if idx := strings.IndexAny(uri, "/?@"); idx != -1 { if uri[idx] == '@' { return nil, fmt.Errorf("unescaped @ sign in user info") } hosts = uri[:idx] } options.Addrs = strings.Split(hosts, ",") uri = uri[len(hosts):] if len(uri) > 0 && uri[0] == '/' { uri = uri[1:] } // grab connection arguments from URI connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) if err != nil { return nil, err } for _, pair := range connectionArgsFromQueryString { err = addOption(options, pair) if err != nil { return nil, err } } return options, nil } func extractQueryArgsFromURI(uri string) ([]string, error) { if len(uri) == 0 { return nil, nil } if uri[0] != '?' { return nil, errors.New("must have a ? separator between path and query") } uri = uri[1:] if len(uri) == 0 { return nil, nil } return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil } type optionHandler struct { int *int bool *bool duration *time.Duration } func addOption(options *redis.ClusterOptions, pair string) error { kv := strings.SplitN(pair, "=", 2) if len(kv) != 2 || kv[0] == "" { return fmt.Errorf("invalid option") } key, err := url.QueryUnescape(kv[0]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0])) } value, err := url.QueryUnescape(kv[1]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1])) } handlers := map[string]optionHandler{ "max_retries": {int: &options.MaxRetries}, "min_retry_backoff": {duration: &options.MinRetryBackoff}, "max_retry_backoff": {duration: &options.MaxRetryBackoff}, "dial_timeout": {duration: &options.DialTimeout}, "read_timeout": {duration: &options.ReadTimeout}, "write_timeout": {duration: &options.WriteTimeout}, "pool_fifo": {bool: &options.PoolFIFO}, "pool_size": {int: &options.PoolSize}, "pool_timeout": {duration: &options.PoolTimeout}, "min_idle_conns": {int: &options.MinIdleConns}, "max_idle_conns": {int: &options.MaxIdleConns}, "conn_max_idle_time": {duration: &options.ConnMaxIdleTime}, "conn_max_lifetime": {duration: &options.ConnMaxLifetime}, } lowerKey := strings.ToLower(key) if handler, ok := handlers[lowerKey]; ok { if handler.int != nil { *handler.int, err = strconv.Atoi(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.duration != nil { *handler.duration, err = time.ParseDuration(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.bool != nil { *handler.bool, err = strconv.ParseBool(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } return nil } // Redis cache storage. type Redis struct { storage.TablePrefix client redis.UniversalClient } // Close redis connection. func (r *Redis) Close() error { return r.client.Close() } func (r *Redis) Ping() error { return r.client.Ping(context.Background()).Err() } // Init nothing. func (r *Redis) Init() error { return nil } func (r *Redis) Scan(work func(string) error) error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.scan(ctx, client, work) }) } else { return r.scan(ctx, r.client, work) } } func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } for _, key := range result { if err = work(key[len(r.TablePrefix):]); err != nil { return errors.Trace(err) } } if cursor == 0 { return nil } } } func (r *Redis) Purge() error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.purge(ctx, client, isCluster) }) } else { return r.purge(ctx, r.client, isCluster) } } func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } if len(result) > 0 { if isCluster { p := client.Pipeline() for _, key := range result { if err = p.Del(ctx, key).Err(); err != nil { return errors.Trace(err) } } if _, err = p.Exec(ctx); err != nil { return errors.Trace(err) } } else { if err = client.Del(ctx, result...).Err(); err != nil { return errors.Trace(err) } } } if cursor == 0 { return nil } } } func (r *Redis) Set(ctx context.Context, values ...Value) error { p := r.client.Pipeline() for _, v := range values { if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil { return errors.Trace(err) } } _, err := p.Exec(ctx) return errors.Trace(err) } // Get returns a value from Redis. func (r *Redis) Get(ctx context.Context, key string) *ReturnValue { val, err := r.client.Get(ctx, r.Key(key)).Result() if err != nil { if err == redis.Nil { return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)} } return &ReturnValue{err: err} } return &ReturnValue{value: val} } // Delete object from Redis. func (r *Redis) Delete(ctx context.Context, key string) error { return r.client.Del(ctx, r.Key(key)).Err() } // GetSet returns members of a set from Redis. func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) { return r.client.SMembers(ctx, r.Key(key)).Result() } // SetSet overrides a set with members in Redis. func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) pipeline.SAdd(ctx, r.Key(key), values...) _, err := pipeline.Exec(ctx) return err } // AddSet adds members to a set in Redis. func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set return r.client.SAdd(ctx, r.Key(key), values...).Err() } // RemSet removes members from a set in Redis. func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } return r.client.SRem(ctx, r.Key(key), members).Err() } // GetSorted get scores from sorted set. func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) { members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) { members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{ Min: strconv.FormatFloat(begin, 'g', -1, 64), Max: strconv.FormatFloat(end, 'g', -1, 64), Offset: 0, Count: -1, }).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error { return r.client.ZRemRangeByScore(ctx, r.Key(key), strconv.FormatFloat(begin, 'g', -1, 64),
// AddSorted add scores to sorted set. func (r *Redis) AddSorted(ctx context.Context, sortedSets ...SortedSet) error { p := r.client.Pipeline() for _, sorted := range sortedSets { if len(sorted.scores) > 0 { members := make([]redis.Z, 0, len(sorted.scores)) for _, score := range sorted.scores { members = append(members, redis.Z{Member: score.Id, Score: score.Score}) } p.ZAdd(ctx, r.Key(sorted.name), members...) } } _, err := p.Exec(ctx) return err } // SetSorted set scores in sorted set and clear previous scores. func (r *Redis) SetSorted(ctx context.Context, key string, scores []Scored) error { members := make([]redis.Z, 0, len(scores)) for _, score := range scores { members = append(members, redis.Z{Member: score.Id, Score: float64(score.Score)}) } pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) if len(scores) > 0 { pipeline.ZAdd(ctx, r.Key(key), members...) } _, err := pipeline.Exec(ctx) return err } // RemSorted method of NoDatabase returns ErrNoDatabase. func (r *Redis) RemSorted(ctx context.Context, members ...SetMember) error { if len(members) == 0 { return nil } pipe := r.client.Pipeline() for _, member := range members { pipe.ZRem(ctx, r.Key(member.name), member.member) } _, err := pipe.Exec(ctx) return errors.Trace(err) }
strconv.FormatFloat(end, 'g', -1, 64)). Err() }
random_line_split
redis.go
// Copyright 2020 gorse Project Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cache import ( "context" "fmt" "github.com/go-redis/redis/v9" "github.com/juju/errors" "github.com/zhenghaoz/gorse/storage" "net/url" "strconv" "strings" "time" ) func ParseRedisClusterURL(redisURL string) (*redis.ClusterOptions, error) { options := &redis.ClusterOptions{} uri := redisURL var err error if strings.HasPrefix(redisURL, storage.RedisClusterPrefix) { uri = uri[len(storage.RedisClusterPrefix):] } else { return nil, fmt.Errorf("scheme must be \"redis+cluster\"") } if idx := strings.Index(uri, "@"); idx != -1 { userInfo := uri[:idx] uri = uri[idx+1:] username := userInfo var password string if idx := strings.Index(userInfo, ":"); idx != -1 { username = userInfo[:idx] password = userInfo[idx+1:] } // Validate and process the username. if strings.Contains(username, "/") { return nil, fmt.Errorf("unescaped slash in username") } options.Username, err = url.PathUnescape(username) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid username")) } // Validate and process the password. if strings.Contains(password, ":") { return nil, fmt.Errorf("unescaped colon in password") } if strings.Contains(password, "/") { return nil, fmt.Errorf("unescaped slash in password") } options.Password, err = url.PathUnescape(password) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid password")) } } // fetch the hosts field hosts := uri if idx := strings.IndexAny(uri, "/?@"); idx != -1 { if uri[idx] == '@' { return nil, fmt.Errorf("unescaped @ sign in user info") } hosts = uri[:idx] } options.Addrs = strings.Split(hosts, ",") uri = uri[len(hosts):] if len(uri) > 0 && uri[0] == '/' { uri = uri[1:] } // grab connection arguments from URI connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) if err != nil { return nil, err } for _, pair := range connectionArgsFromQueryString { err = addOption(options, pair) if err != nil { return nil, err } } return options, nil } func extractQueryArgsFromURI(uri string) ([]string, error) { if len(uri) == 0 { return nil, nil } if uri[0] != '?' { return nil, errors.New("must have a ? separator between path and query") } uri = uri[1:] if len(uri) == 0 { return nil, nil } return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil } type optionHandler struct { int *int bool *bool duration *time.Duration } func addOption(options *redis.ClusterOptions, pair string) error { kv := strings.SplitN(pair, "=", 2) if len(kv) != 2 || kv[0] == "" { return fmt.Errorf("invalid option") } key, err := url.QueryUnescape(kv[0]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0])) } value, err := url.QueryUnescape(kv[1]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1])) } handlers := map[string]optionHandler{ "max_retries": {int: &options.MaxRetries}, "min_retry_backoff": {duration: &options.MinRetryBackoff}, "max_retry_backoff": {duration: &options.MaxRetryBackoff}, "dial_timeout": {duration: &options.DialTimeout}, "read_timeout": {duration: &options.ReadTimeout}, "write_timeout": {duration: &options.WriteTimeout}, "pool_fifo": {bool: &options.PoolFIFO}, "pool_size": {int: &options.PoolSize}, "pool_timeout": {duration: &options.PoolTimeout}, "min_idle_conns": {int: &options.MinIdleConns}, "max_idle_conns": {int: &options.MaxIdleConns}, "conn_max_idle_time": {duration: &options.ConnMaxIdleTime}, "conn_max_lifetime": {duration: &options.ConnMaxLifetime}, } lowerKey := strings.ToLower(key) if handler, ok := handlers[lowerKey]; ok { if handler.int != nil { *handler.int, err = strconv.Atoi(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.duration != nil { *handler.duration, err = time.ParseDuration(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.bool != nil { *handler.bool, err = strconv.ParseBool(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } return nil } // Redis cache storage. type Redis struct { storage.TablePrefix client redis.UniversalClient } // Close redis connection. func (r *Redis) Close() error { return r.client.Close() } func (r *Redis) Ping() error { return r.client.Ping(context.Background()).Err() } // Init nothing. func (r *Redis) Init() error { return nil } func (r *Redis) Scan(work func(string) error) error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.scan(ctx, client, work) }) } else { return r.scan(ctx, r.client, work) } } func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } for _, key := range result { if err = work(key[len(r.TablePrefix):]); err != nil { return errors.Trace(err) } } if cursor == 0 { return nil } } } func (r *Redis) Purge() error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.purge(ctx, client, isCluster) }) } else { return r.purge(ctx, r.client, isCluster) } } func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } if len(result) > 0 { if isCluster { p := client.Pipeline() for _, key := range result { if err = p.Del(ctx, key).Err(); err != nil { return errors.Trace(err) } } if _, err = p.Exec(ctx); err != nil { return errors.Trace(err) } } else { if err = client.Del(ctx, result...).Err(); err != nil { return errors.Trace(err) } } } if cursor == 0 { return nil } } } func (r *Redis) Set(ctx context.Context, values ...Value) error { p := r.client.Pipeline() for _, v := range values { if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil { return errors.Trace(err) } } _, err := p.Exec(ctx) return errors.Trace(err) } // Get returns a value from Redis. func (r *Redis) Get(ctx context.Context, key string) *ReturnValue { val, err := r.client.Get(ctx, r.Key(key)).Result() if err != nil { if err == redis.Nil
return &ReturnValue{err: err} } return &ReturnValue{value: val} } // Delete object from Redis. func (r *Redis) Delete(ctx context.Context, key string) error { return r.client.Del(ctx, r.Key(key)).Err() } // GetSet returns members of a set from Redis. func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) { return r.client.SMembers(ctx, r.Key(key)).Result() } // SetSet overrides a set with members in Redis. func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) pipeline.SAdd(ctx, r.Key(key), values...) _, err := pipeline.Exec(ctx) return err } // AddSet adds members to a set in Redis. func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set return r.client.SAdd(ctx, r.Key(key), values...).Err() } // RemSet removes members from a set in Redis. func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } return r.client.SRem(ctx, r.Key(key), members).Err() } // GetSorted get scores from sorted set. func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) { members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) { members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{ Min: strconv.FormatFloat(begin, 'g', -1, 64), Max: strconv.FormatFloat(end, 'g', -1, 64), Offset: 0, Count: -1, }).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error { return r.client.ZRemRangeByScore(ctx, r.Key(key), strconv.FormatFloat(begin, 'g', -1, 64), strconv.FormatFloat(end, 'g', -1, 64)). Err() } // AddSorted add scores to sorted set. func (r *Redis) AddSorted(ctx context.Context, sortedSets ...SortedSet) error { p := r.client.Pipeline() for _, sorted := range sortedSets { if len(sorted.scores) > 0 { members := make([]redis.Z, 0, len(sorted.scores)) for _, score := range sorted.scores { members = append(members, redis.Z{Member: score.Id, Score: score.Score}) } p.ZAdd(ctx, r.Key(sorted.name), members...) } } _, err := p.Exec(ctx) return err } // SetSorted set scores in sorted set and clear previous scores. func (r *Redis) SetSorted(ctx context.Context, key string, scores []Scored) error { members := make([]redis.Z, 0, len(scores)) for _, score := range scores { members = append(members, redis.Z{Member: score.Id, Score: float64(score.Score)}) } pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) if len(scores) > 0 { pipeline.ZAdd(ctx, r.Key(key), members...) } _, err := pipeline.Exec(ctx) return err } // RemSorted method of NoDatabase returns ErrNoDatabase. func (r *Redis) RemSorted(ctx context.Context, members ...SetMember) error { if len(members) == 0 { return nil } pipe := r.client.Pipeline() for _, member := range members { pipe.ZRem(ctx, r.Key(member.name), member.member) } _, err := pipe.Exec(ctx) return errors.Trace(err) }
{ return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)} }
conditional_block
redis.go
// Copyright 2020 gorse Project Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cache import ( "context" "fmt" "github.com/go-redis/redis/v9" "github.com/juju/errors" "github.com/zhenghaoz/gorse/storage" "net/url" "strconv" "strings" "time" ) func ParseRedisClusterURL(redisURL string) (*redis.ClusterOptions, error) { options := &redis.ClusterOptions{} uri := redisURL var err error if strings.HasPrefix(redisURL, storage.RedisClusterPrefix) { uri = uri[len(storage.RedisClusterPrefix):] } else { return nil, fmt.Errorf("scheme must be \"redis+cluster\"") } if idx := strings.Index(uri, "@"); idx != -1 { userInfo := uri[:idx] uri = uri[idx+1:] username := userInfo var password string if idx := strings.Index(userInfo, ":"); idx != -1 { username = userInfo[:idx] password = userInfo[idx+1:] } // Validate and process the username. if strings.Contains(username, "/") { return nil, fmt.Errorf("unescaped slash in username") } options.Username, err = url.PathUnescape(username) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid username")) } // Validate and process the password. if strings.Contains(password, ":") { return nil, fmt.Errorf("unescaped colon in password") } if strings.Contains(password, "/") { return nil, fmt.Errorf("unescaped slash in password") } options.Password, err = url.PathUnescape(password) if err != nil { return nil, errors.Wrap(err, fmt.Errorf("invalid password")) } } // fetch the hosts field hosts := uri if idx := strings.IndexAny(uri, "/?@"); idx != -1 { if uri[idx] == '@' { return nil, fmt.Errorf("unescaped @ sign in user info") } hosts = uri[:idx] } options.Addrs = strings.Split(hosts, ",") uri = uri[len(hosts):] if len(uri) > 0 && uri[0] == '/' { uri = uri[1:] } // grab connection arguments from URI connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) if err != nil { return nil, err } for _, pair := range connectionArgsFromQueryString { err = addOption(options, pair) if err != nil { return nil, err } } return options, nil } func extractQueryArgsFromURI(uri string) ([]string, error) { if len(uri) == 0 { return nil, nil } if uri[0] != '?' { return nil, errors.New("must have a ? separator between path and query") } uri = uri[1:] if len(uri) == 0 { return nil, nil } return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil } type optionHandler struct { int *int bool *bool duration *time.Duration } func addOption(options *redis.ClusterOptions, pair string) error { kv := strings.SplitN(pair, "=", 2) if len(kv) != 2 || kv[0] == "" { return fmt.Errorf("invalid option") } key, err := url.QueryUnescape(kv[0]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option key %q", kv[0])) } value, err := url.QueryUnescape(kv[1]) if err != nil { return errors.Wrap(err, errors.Errorf("invalid option value %q", kv[1])) } handlers := map[string]optionHandler{ "max_retries": {int: &options.MaxRetries}, "min_retry_backoff": {duration: &options.MinRetryBackoff}, "max_retry_backoff": {duration: &options.MaxRetryBackoff}, "dial_timeout": {duration: &options.DialTimeout}, "read_timeout": {duration: &options.ReadTimeout}, "write_timeout": {duration: &options.WriteTimeout}, "pool_fifo": {bool: &options.PoolFIFO}, "pool_size": {int: &options.PoolSize}, "pool_timeout": {duration: &options.PoolTimeout}, "min_idle_conns": {int: &options.MinIdleConns}, "max_idle_conns": {int: &options.MaxIdleConns}, "conn_max_idle_time": {duration: &options.ConnMaxIdleTime}, "conn_max_lifetime": {duration: &options.ConnMaxLifetime}, } lowerKey := strings.ToLower(key) if handler, ok := handlers[lowerKey]; ok { if handler.int != nil { *handler.int, err = strconv.Atoi(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.duration != nil { *handler.duration, err = time.ParseDuration(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else if handler.bool != nil { *handler.bool, err = strconv.ParseBool(value) if err != nil { return errors.Wrap(err, fmt.Errorf("invalid '%s' value: %q", key, value)) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } } else { return fmt.Errorf("redis: unexpected option: %s", key) } return nil } // Redis cache storage. type Redis struct { storage.TablePrefix client redis.UniversalClient } // Close redis connection. func (r *Redis) Close() error { return r.client.Close() } func (r *Redis) Ping() error
// Init nothing. func (r *Redis) Init() error { return nil } func (r *Redis) Scan(work func(string) error) error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.scan(ctx, client, work) }) } else { return r.scan(ctx, r.client, work) } } func (r *Redis) scan(ctx context.Context, client redis.UniversalClient, work func(string) error) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } for _, key := range result { if err = work(key[len(r.TablePrefix):]); err != nil { return errors.Trace(err) } } if cursor == 0 { return nil } } } func (r *Redis) Purge() error { ctx := context.Background() if clusterClient, isCluster := r.client.(*redis.ClusterClient); isCluster { return clusterClient.ForEachMaster(ctx, func(ctx context.Context, client *redis.Client) error { return r.purge(ctx, client, isCluster) }) } else { return r.purge(ctx, r.client, isCluster) } } func (r *Redis) purge(ctx context.Context, client redis.UniversalClient, isCluster bool) error { var ( result []string cursor uint64 err error ) for { result, cursor, err = client.Scan(ctx, cursor, string(r.TablePrefix)+"*", 0).Result() if err != nil { return errors.Trace(err) } if len(result) > 0 { if isCluster { p := client.Pipeline() for _, key := range result { if err = p.Del(ctx, key).Err(); err != nil { return errors.Trace(err) } } if _, err = p.Exec(ctx); err != nil { return errors.Trace(err) } } else { if err = client.Del(ctx, result...).Err(); err != nil { return errors.Trace(err) } } } if cursor == 0 { return nil } } } func (r *Redis) Set(ctx context.Context, values ...Value) error { p := r.client.Pipeline() for _, v := range values { if err := p.Set(ctx, r.Key(v.name), v.value, 0).Err(); err != nil { return errors.Trace(err) } } _, err := p.Exec(ctx) return errors.Trace(err) } // Get returns a value from Redis. func (r *Redis) Get(ctx context.Context, key string) *ReturnValue { val, err := r.client.Get(ctx, r.Key(key)).Result() if err != nil { if err == redis.Nil { return &ReturnValue{err: errors.Annotate(ErrObjectNotExist, key)} } return &ReturnValue{err: err} } return &ReturnValue{value: val} } // Delete object from Redis. func (r *Redis) Delete(ctx context.Context, key string) error { return r.client.Del(ctx, r.Key(key)).Err() } // GetSet returns members of a set from Redis. func (r *Redis) GetSet(ctx context.Context, key string) ([]string, error) { return r.client.SMembers(ctx, r.Key(key)).Result() } // SetSet overrides a set with members in Redis. func (r *Redis) SetSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) pipeline.SAdd(ctx, r.Key(key), values...) _, err := pipeline.Exec(ctx) return err } // AddSet adds members to a set in Redis. func (r *Redis) AddSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } // convert strings to interfaces values := make([]interface{}, 0, len(members)) for _, member := range members { values = append(values, member) } // push set return r.client.SAdd(ctx, r.Key(key), values...).Err() } // RemSet removes members from a set in Redis. func (r *Redis) RemSet(ctx context.Context, key string, members ...string) error { if len(members) == 0 { return nil } return r.client.SRem(ctx, r.Key(key), members).Err() } // GetSorted get scores from sorted set. func (r *Redis) GetSorted(ctx context.Context, key string, begin, end int) ([]Scored, error) { members, err := r.client.ZRevRangeWithScores(ctx, r.Key(key), int64(begin), int64(end)).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) GetSortedByScore(ctx context.Context, key string, begin, end float64) ([]Scored, error) { members, err := r.client.ZRangeByScoreWithScores(ctx, r.Key(key), &redis.ZRangeBy{ Min: strconv.FormatFloat(begin, 'g', -1, 64), Max: strconv.FormatFloat(end, 'g', -1, 64), Offset: 0, Count: -1, }).Result() if err != nil { return nil, err } results := make([]Scored, 0, len(members)) for _, member := range members { results = append(results, Scored{Id: member.Member.(string), Score: member.Score}) } return results, nil } func (r *Redis) RemSortedByScore(ctx context.Context, key string, begin, end float64) error { return r.client.ZRemRangeByScore(ctx, r.Key(key), strconv.FormatFloat(begin, 'g', -1, 64), strconv.FormatFloat(end, 'g', -1, 64)). Err() } // AddSorted add scores to sorted set. func (r *Redis) AddSorted(ctx context.Context, sortedSets ...SortedSet) error { p := r.client.Pipeline() for _, sorted := range sortedSets { if len(sorted.scores) > 0 { members := make([]redis.Z, 0, len(sorted.scores)) for _, score := range sorted.scores { members = append(members, redis.Z{Member: score.Id, Score: score.Score}) } p.ZAdd(ctx, r.Key(sorted.name), members...) } } _, err := p.Exec(ctx) return err } // SetSorted set scores in sorted set and clear previous scores. func (r *Redis) SetSorted(ctx context.Context, key string, scores []Scored) error { members := make([]redis.Z, 0, len(scores)) for _, score := range scores { members = append(members, redis.Z{Member: score.Id, Score: float64(score.Score)}) } pipeline := r.client.Pipeline() pipeline.Del(ctx, r.Key(key)) if len(scores) > 0 { pipeline.ZAdd(ctx, r.Key(key), members...) } _, err := pipeline.Exec(ctx) return err } // RemSorted method of NoDatabase returns ErrNoDatabase. func (r *Redis) RemSorted(ctx context.Context, members ...SetMember) error { if len(members) == 0 { return nil } pipe := r.client.Pipeline() for _, member := range members { pipe.ZRem(ctx, r.Key(member.name), member.member) } _, err := pipe.Exec(ctx) return errors.Trace(err) }
{ return r.client.Ping(context.Background()).Err() }
identifier_body
input.go
package tui import ( "context" "fmt" "sync" "time" "unicode/utf8" "golang.org/x/crypto/ssh/terminal" ) const ( // Basic keycodes, map directly to their ANSI numbering // order strictly matters Null rune = iota // ctrl-space CtrlA CtrlB CtrlC CtrlD // aka EOF CtrlE CtrlF CtrlG CtrlH // \b backsp But actual backspace is 127. CtrlBackspace is 8 Tab // \t aka Ctrl-I CtrlJ // \n, sometimes enter CtrlK CtrlL CtrlM // aka \r, also sometimes enter CtrlN CtrlO CtrlP CtrlQ CtrlR CtrlS CtrlT CtrlU CtrlV CtrlW CtrlX CtrlY CtrlZ ESC // ctrl-[ CtrlFwdSlash // ctrl-\ CtrlBackBracket // ctrl-] CtrlCaret // ctrl-6 (^) CtrlUnderscore // shift-ctrl - /* 32-126 are the printable keyboard keys For regular pressing of these keys, the following table list isn't used. The keys are returned as KeyPrintable with their rune value. Where KeySpecial for the same rune value, it is equivalent to Alt[key]. */ AltSpace // [ESC, 32] = Alt + space AltBang // [ESC, 33] i.e. shift-alt-1 AltDQuo AltHash AltDollar AltPercent AltAmpersand AltSQuo AltOpenParen AltCloseParen AltStar AltPlus AltComma AltMinus AltPeriod AltSlash Alt0 Alt1 Alt2 Alt3 Alt4 Alt5 Alt6 Alt7 Alt8 Alt9 AltColon AltSemicolon AltLT AltEqual AltGT AltQuestion AltAt AltA AltB AltC AltD AltE AltF AltG AltH AltI AltJ AltK AltL AltM AltN AltO AltP AltQ AltR AltS AltT AltU AltV AltW AltX AltY AltZ AltOpenBracket AltFwdSlash AltCloseBracket AltCaret AltUnderscore AltGrave Alta Altb Altc Altd Alte Altf Altg Alth Alti Altj Altk Altl Altm Altn Alto Altp Altq Altr Alts Altt Altu Altv Altw Altx Alty Altz AltOpenCurly AltPipe AltCloseCurly AltTilde AltBS //End printable // actual number values below are arbitrary, are not in order. // Extended keyCodes Del AltDel BTab BSpace PgUp PgDn Up Down Right Left Home End // /relative/ order of the next 8 matter SUp // Shift SDown SRight SLeft CtrlUp CtrlDown CtrlRight CtrlLeft // don't actually need to be in order F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 //sequential calculated. Keep in relative order CtrlAlta CtrlAltb CtrlAltc CtrlAltd CtrlAlte CtrlAltf CtrlAltg CtrlAlth CtrlAlti CtrlAltj CtrlAltk CtrlAltl CtrlAltm CtrlAltn CtrlAlto CtrlAltp CtrlAltq CtrlAltr CtrlAlts CtrlAltt CtrlAltu CtrlAltv CtrlAltw CtrlAltx CtrlAlty CtrlAltz ) type EvType uint8 const ( EventInvalid EvType = iota KeySpecial KeyPrint Mouse ) /* How to determine mouse action: Mousedown: Type=Mouse && Btn != 3 && !Motion Mouseup: Type=Mouse && Btn == 3 && !Motion Mousedrag: Type=Mouse && Btn != 3 && Motion Mousemove: Type=Mouse && Btn == 3 && Motion ScrollUp: Type=Mouse && Btn=4 ScrollDn: Type=Mouse && Btn=5 */ type MouseEvent struct { Y int X int Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown Shift bool Meta bool Ctrl bool Motion bool buf []byte } type Event struct { Type EvType Key rune M *MouseEvent } // returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation) func Printable(i int) bool { return i >= 32 && i <= 126 } /* Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things: - a channel to listen for key events on - a terminal restore function. Always safe to call, especially when error is set - error condition This is the primary use of the top-level tui package, if you intend to capture input, or mouse events */ func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) { ch := make(chan Event, 1000) st, err := terminal.GetState(fd) if err != nil { return nil, func() error { return nil }, err } restore := func() error { return terminal.Restore(fd, st) } _, err = terminal.MakeRaw(fd) if err != nil { return nil, restore, err } ib := inputBuf{b: make([]byte, 0, 9)} go func() { for { select { case <-ctx.Done(): return case ev := <-ib.readEvent(fd): ch <- ev } } }() return ch, restore, nil } type inputBuf struct { b []byte mu sync.Mutex } func (ib *inputBuf) readEvent(fd int) <-chan Event { ch := make(chan Event) go func() { ib.mu.Lock() defer func() { ib.mu.Unlock() }() for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) { } if len(ib.b) == 0 { close(ch) return } sz := 1 defer func() { ib.b = ib.b[sz:] }() switch ib.b[0] { case byte(CtrlC), byte(CtrlG), byte(CtrlQ): ch <- Event{KeySpecial, rune(ib.b[0]), nil} return case 127: ch <- Event{KeySpecial, BSpace, nil} return case 0: ch <- Event{KeySpecial, Null, nil} // Ctrl-space? return case byte(ESC): ch <- ib.escSequence(&sz) return } if ib.b[0] < 32 { // Ctrl-A_Z ch <- Event{KeySpecial, rune(ib.b[0]), nil} return } char, rsz := utf8.DecodeRune(ib.b) if char == utf8.RuneError { ch <- Event{KeySpecial, ESC, nil} return } sz = rsz ch <- Event{KeyPrint, char, nil} }() return ch } /* * Gets first byte, blocking to do so. * Tries to get any extra bytes within a 100ms timespan * like esc key sequences (arrows, etc) * */ func fillBuf(fd int, buf []byte) []byte { const pollInt = 5 //ms const span = 100 //ms -- reflected via retries*pollInt c, ok := getchar(fd, false) if !ok { return buf } buf = append(buf, byte(c)) retries := 0 if c == int(ESC) { retries = span / pollInt // 20 } pc := c for { c, ok := getchar(fd, true) if !ok { if retries > 0 { retries-- time.Sleep(pollInt * time.Millisecond) continue } break } else if c == int(ESC) && pc != c { retries = span / pollInt // got the next char, keep going } else { retries = 0 } buf = append(buf, byte(c)) pc = c } return buf } func
(fd int, nonblock bool) (int, bool) { b := make([]byte, 1) err := setNonBlock(fd, nonblock) if err != nil { return 0, false } if n, err := sysRead(fd, b); err != nil || n < 1 { return 0, false } return int(b[0]), true } //@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp //http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm //this is the ugliest, code ever. to check the seemingly most random //assignment of codes to meaningful keys func (ib *inputBuf) escSequence(sz *int) Event { if len(ib.b) < 2 { return Event{KeySpecial, ESC, nil} } *sz = 2 switch ib.b[1] { case byte(ESC): return Event{KeySpecial, ESC, nil} case 127: return Event{KeySpecial, AltBS, nil} case 91, 79: // [, O if len(ib.b) < 3 { if ib.b[1] == '[' { return Event{KeySpecial, AltOpenBracket, nil} } else if ib.b[1] == 'O' { return Event{KeySpecial, AltO, nil} } return debugEv(ib.b) } *sz = 3 switch ib.b[2] { case 65: return Event{KeySpecial, Up, nil} case 66: return Event{KeySpecial, Down, nil} case 67: return Event{KeySpecial, Right, nil} case 68: return Event{KeySpecial, Left, nil} case 90: return Event{KeySpecial, BTab, nil} case 72: return Event{KeySpecial, Home, nil} case 70: return Event{KeySpecial, End, nil} case 77: return ib.mouseSequence(sz) case 80: return Event{KeySpecial, F1, nil} case 81: return Event{KeySpecial, F2, nil} case 82: return Event{KeySpecial, F3, nil} case 49, 50, 51, 52, 53, 54: if len(ib.b) < 4 { return debugEv(ib.b) } *sz = 4 switch ib.b[2] { case 50: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 48: return Event{KeySpecial, F9, nil} case 49: return Event{KeySpecial, F10, nil} // @todo: WTF does 50 mean? case 51: return Event{KeySpecial, F11, nil} case 52: return Event{KeySpecial, F12, nil} } } else if ib.b[3] == '0' && (ib.b[4] == '0' || ib.b[4] == '1') && ib.b[5] == '~' { // bracketed paste mode. \e[200~ .. \e[201~ //discard seq from buffer ib.b = ib.b[6:] *sz = 0 return Event{KeySpecial, Null, nil} } return debugEv(ib.b) case 51: if len(ib.b) >= 6 && ib.b[3] == 59 && ib.b[4] == 51 && ib.b[5] == '~' { *sz = 6 return Event{KeySpecial, AltDel, nil} } return Event{KeySpecial, Del, nil} case 52: return Event{KeySpecial, End, nil} case 53: return Event{KeySpecial, PgUp, nil} case 54: return Event{KeySpecial, PgDn, nil} case 49: //'1' switch ib.b[3] { case 126: return Event{KeySpecial, Home, nil} case 53, 55, 56, 57: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 53: return Event{KeySpecial, F5, nil} case 55: return Event{KeySpecial, F6, nil} case 56: return Event{KeySpecial, F7, nil} case 57: return Event{KeySpecial, F8, nil} } } return debugEv(ib.b) case ';': //59 if len(ib.b) != 6 { return debugEv(ib.b) } *sz = 6 if ib.b[4] != '2' && ib.b[4] != '5' { return debugEv(ib.b) } if ib.b[5] < 'A' || ib.b[5] > 'D' { return debugEv(ib.b) } //ESC[1;2A == shift-up //ESC[1;5A == ctrl-up k := SUp if ib.b[4] == '5' { // move to up Ctrl* k += 4 } k += rune(int(ib.b[5]) - int('A')) // set arrow direction return Event{KeySpecial, k, nil} } } } } // ESC-0 ~ ESC-26 == ctrl-alt-[key] if ib.b[1] >= 1 && ib.b[1] <= 'z'-'a'+1 { return Event{KeySpecial, rune(int(CtrlAlta) + int(ib.b[1]) - 1), nil} } // ESC-32 ~ ESC-126 == alt-[key] if Printable(int(ib.b[1])) { return Event{KeySpecial, rune(ib.b[1]), nil} } return debugEv(ib.b) } // mouse stuff func debugEv(buf []byte) Event { b := make([]byte, len(buf)) copy(b, buf) return Event{EventInvalid, 0, &MouseEvent{0, 0, 0, false, false, false, false, b}} } // https://www.xfree86.org/current/ctlseqs.html#Mouse%20Tracking // \x1b[M<button><x+33><y+33> func (ib *inputBuf) mouseSequence(sz *int) Event { b := make([]byte, len(ib.b)) copy(b, ib.b) if len(ib.b) < 6 { fmt.Printf("short mouse seq: %v\n", ib.b) return debugEv(ib.b) } *sz = 6 evCode := int(b[3] - 32) bNum := evCode & 0x3 // low two bits, 00=MB1, 01=MB2, 10=MB3, 11=Release if evCode&(1<<6) != 0 { bNum += 4 // scroll buttons set a high bit (+32) } shift := evCode&(1<<2) != 0 // 4 meta := evCode&(1<<3) != 0 // 8 ctrl := evCode&(1<<4) != 0 // 16 motion := evCode&(1<<5) != 0 //32, motion indicator x := int(ib.b[4] - 33) y := int(ib.b[5] - 33) // - yoffset if any return Event{Mouse, 0, &MouseEvent{y, x, bNum, shift, meta, ctrl, motion, b}} }
getchar
identifier_name
input.go
package tui import ( "context" "fmt" "sync" "time" "unicode/utf8" "golang.org/x/crypto/ssh/terminal" ) const ( // Basic keycodes, map directly to their ANSI numbering // order strictly matters Null rune = iota // ctrl-space CtrlA CtrlB CtrlC CtrlD // aka EOF CtrlE CtrlF CtrlG CtrlH // \b backsp But actual backspace is 127. CtrlBackspace is 8 Tab // \t aka Ctrl-I CtrlJ // \n, sometimes enter CtrlK CtrlL CtrlM // aka \r, also sometimes enter CtrlN CtrlO CtrlP CtrlQ CtrlR CtrlS CtrlT CtrlU CtrlV CtrlW CtrlX CtrlY CtrlZ ESC // ctrl-[ CtrlFwdSlash // ctrl-\ CtrlBackBracket // ctrl-] CtrlCaret // ctrl-6 (^) CtrlUnderscore // shift-ctrl - /* 32-126 are the printable keyboard keys For regular pressing of these keys, the following table list isn't used. The keys are returned as KeyPrintable with their rune value. Where KeySpecial for the same rune value, it is equivalent to Alt[key]. */ AltSpace // [ESC, 32] = Alt + space AltBang // [ESC, 33] i.e. shift-alt-1 AltDQuo AltHash AltDollar AltPercent AltAmpersand AltSQuo AltOpenParen AltCloseParen AltStar AltPlus AltComma AltMinus AltPeriod AltSlash Alt0 Alt1 Alt2 Alt3 Alt4 Alt5 Alt6 Alt7 Alt8 Alt9 AltColon AltSemicolon AltLT AltEqual AltGT AltQuestion AltAt AltA AltB AltC AltD AltE AltF AltG AltH AltI AltJ AltK AltL AltM AltN AltO AltP AltQ AltR AltS AltT AltU AltV AltW AltX AltY AltZ AltOpenBracket AltFwdSlash AltCloseBracket AltCaret AltUnderscore AltGrave Alta Altb Altc Altd Alte Altf Altg Alth Alti Altj Altk Altl Altm Altn Alto Altp Altq Altr Alts Altt Altu Altv Altw Altx Alty Altz AltOpenCurly AltPipe AltCloseCurly AltTilde AltBS //End printable // actual number values below are arbitrary, are not in order. // Extended keyCodes Del AltDel BTab BSpace PgUp PgDn Up Down Right Left Home End // /relative/ order of the next 8 matter SUp // Shift SDown SRight SLeft CtrlUp CtrlDown CtrlRight CtrlLeft // don't actually need to be in order F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 //sequential calculated. Keep in relative order CtrlAlta CtrlAltb CtrlAltc CtrlAltd CtrlAlte CtrlAltf CtrlAltg CtrlAlth CtrlAlti CtrlAltj CtrlAltk CtrlAltl CtrlAltm CtrlAltn CtrlAlto CtrlAltp CtrlAltq CtrlAltr CtrlAlts CtrlAltt CtrlAltu CtrlAltv CtrlAltw CtrlAltx CtrlAlty CtrlAltz ) type EvType uint8 const ( EventInvalid EvType = iota KeySpecial KeyPrint Mouse ) /* How to determine mouse action: Mousedown: Type=Mouse && Btn != 3 && !Motion Mouseup: Type=Mouse && Btn == 3 && !Motion Mousedrag: Type=Mouse && Btn != 3 && Motion Mousemove: Type=Mouse && Btn == 3 && Motion ScrollUp: Type=Mouse && Btn=4 ScrollDn: Type=Mouse && Btn=5 */ type MouseEvent struct { Y int X int Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown Shift bool Meta bool Ctrl bool Motion bool buf []byte } type Event struct { Type EvType Key rune M *MouseEvent } // returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation) func Printable(i int) bool { return i >= 32 && i <= 126 } /* Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things: - a channel to listen for key events on - a terminal restore function. Always safe to call, especially when error is set - error condition This is the primary use of the top-level tui package, if you intend to capture input, or mouse events */ func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) { ch := make(chan Event, 1000) st, err := terminal.GetState(fd) if err != nil { return nil, func() error { return nil }, err } restore := func() error { return terminal.Restore(fd, st) } _, err = terminal.MakeRaw(fd) if err != nil
ib := inputBuf{b: make([]byte, 0, 9)} go func() { for { select { case <-ctx.Done(): return case ev := <-ib.readEvent(fd): ch <- ev } } }() return ch, restore, nil } type inputBuf struct { b []byte mu sync.Mutex } func (ib *inputBuf) readEvent(fd int) <-chan Event { ch := make(chan Event) go func() { ib.mu.Lock() defer func() { ib.mu.Unlock() }() for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) { } if len(ib.b) == 0 { close(ch) return } sz := 1 defer func() { ib.b = ib.b[sz:] }() switch ib.b[0] { case byte(CtrlC), byte(CtrlG), byte(CtrlQ): ch <- Event{KeySpecial, rune(ib.b[0]), nil} return case 127: ch <- Event{KeySpecial, BSpace, nil} return case 0: ch <- Event{KeySpecial, Null, nil} // Ctrl-space? return case byte(ESC): ch <- ib.escSequence(&sz) return } if ib.b[0] < 32 { // Ctrl-A_Z ch <- Event{KeySpecial, rune(ib.b[0]), nil} return } char, rsz := utf8.DecodeRune(ib.b) if char == utf8.RuneError { ch <- Event{KeySpecial, ESC, nil} return } sz = rsz ch <- Event{KeyPrint, char, nil} }() return ch } /* * Gets first byte, blocking to do so. * Tries to get any extra bytes within a 100ms timespan * like esc key sequences (arrows, etc) * */ func fillBuf(fd int, buf []byte) []byte { const pollInt = 5 //ms const span = 100 //ms -- reflected via retries*pollInt c, ok := getchar(fd, false) if !ok { return buf } buf = append(buf, byte(c)) retries := 0 if c == int(ESC) { retries = span / pollInt // 20 } pc := c for { c, ok := getchar(fd, true) if !ok { if retries > 0 { retries-- time.Sleep(pollInt * time.Millisecond) continue } break } else if c == int(ESC) && pc != c { retries = span / pollInt // got the next char, keep going } else { retries = 0 } buf = append(buf, byte(c)) pc = c } return buf } func getchar(fd int, nonblock bool) (int, bool) { b := make([]byte, 1) err := setNonBlock(fd, nonblock) if err != nil { return 0, false } if n, err := sysRead(fd, b); err != nil || n < 1 { return 0, false } return int(b[0]), true } //@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp //http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm //this is the ugliest, code ever. to check the seemingly most random //assignment of codes to meaningful keys func (ib *inputBuf) escSequence(sz *int) Event { if len(ib.b) < 2 { return Event{KeySpecial, ESC, nil} } *sz = 2 switch ib.b[1] { case byte(ESC): return Event{KeySpecial, ESC, nil} case 127: return Event{KeySpecial, AltBS, nil} case 91, 79: // [, O if len(ib.b) < 3 { if ib.b[1] == '[' { return Event{KeySpecial, AltOpenBracket, nil} } else if ib.b[1] == 'O' { return Event{KeySpecial, AltO, nil} } return debugEv(ib.b) } *sz = 3 switch ib.b[2] { case 65: return Event{KeySpecial, Up, nil} case 66: return Event{KeySpecial, Down, nil} case 67: return Event{KeySpecial, Right, nil} case 68: return Event{KeySpecial, Left, nil} case 90: return Event{KeySpecial, BTab, nil} case 72: return Event{KeySpecial, Home, nil} case 70: return Event{KeySpecial, End, nil} case 77: return ib.mouseSequence(sz) case 80: return Event{KeySpecial, F1, nil} case 81: return Event{KeySpecial, F2, nil} case 82: return Event{KeySpecial, F3, nil} case 49, 50, 51, 52, 53, 54: if len(ib.b) < 4 { return debugEv(ib.b) } *sz = 4 switch ib.b[2] { case 50: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 48: return Event{KeySpecial, F9, nil} case 49: return Event{KeySpecial, F10, nil} // @todo: WTF does 50 mean? case 51: return Event{KeySpecial, F11, nil} case 52: return Event{KeySpecial, F12, nil} } } else if ib.b[3] == '0' && (ib.b[4] == '0' || ib.b[4] == '1') && ib.b[5] == '~' { // bracketed paste mode. \e[200~ .. \e[201~ //discard seq from buffer ib.b = ib.b[6:] *sz = 0 return Event{KeySpecial, Null, nil} } return debugEv(ib.b) case 51: if len(ib.b) >= 6 && ib.b[3] == 59 && ib.b[4] == 51 && ib.b[5] == '~' { *sz = 6 return Event{KeySpecial, AltDel, nil} } return Event{KeySpecial, Del, nil} case 52: return Event{KeySpecial, End, nil} case 53: return Event{KeySpecial, PgUp, nil} case 54: return Event{KeySpecial, PgDn, nil} case 49: //'1' switch ib.b[3] { case 126: return Event{KeySpecial, Home, nil} case 53, 55, 56, 57: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 53: return Event{KeySpecial, F5, nil} case 55: return Event{KeySpecial, F6, nil} case 56: return Event{KeySpecial, F7, nil} case 57: return Event{KeySpecial, F8, nil} } } return debugEv(ib.b) case ';': //59 if len(ib.b) != 6 { return debugEv(ib.b) } *sz = 6 if ib.b[4] != '2' && ib.b[4] != '5' { return debugEv(ib.b) } if ib.b[5] < 'A' || ib.b[5] > 'D' { return debugEv(ib.b) } //ESC[1;2A == shift-up //ESC[1;5A == ctrl-up k := SUp if ib.b[4] == '5' { // move to up Ctrl* k += 4 } k += rune(int(ib.b[5]) - int('A')) // set arrow direction return Event{KeySpecial, k, nil} } } } } // ESC-0 ~ ESC-26 == ctrl-alt-[key] if ib.b[1] >= 1 && ib.b[1] <= 'z'-'a'+1 { return Event{KeySpecial, rune(int(CtrlAlta) + int(ib.b[1]) - 1), nil} } // ESC-32 ~ ESC-126 == alt-[key] if Printable(int(ib.b[1])) { return Event{KeySpecial, rune(ib.b[1]), nil} } return debugEv(ib.b) } // mouse stuff func debugEv(buf []byte) Event { b := make([]byte, len(buf)) copy(b, buf) return Event{EventInvalid, 0, &MouseEvent{0, 0, 0, false, false, false, false, b}} } // https://www.xfree86.org/current/ctlseqs.html#Mouse%20Tracking // \x1b[M<button><x+33><y+33> func (ib *inputBuf) mouseSequence(sz *int) Event { b := make([]byte, len(ib.b)) copy(b, ib.b) if len(ib.b) < 6 { fmt.Printf("short mouse seq: %v\n", ib.b) return debugEv(ib.b) } *sz = 6 evCode := int(b[3] - 32) bNum := evCode & 0x3 // low two bits, 00=MB1, 01=MB2, 10=MB3, 11=Release if evCode&(1<<6) != 0 { bNum += 4 // scroll buttons set a high bit (+32) } shift := evCode&(1<<2) != 0 // 4 meta := evCode&(1<<3) != 0 // 8 ctrl := evCode&(1<<4) != 0 // 16 motion := evCode&(1<<5) != 0 //32, motion indicator x := int(ib.b[4] - 33) y := int(ib.b[5] - 33) // - yoffset if any return Event{Mouse, 0, &MouseEvent{y, x, bNum, shift, meta, ctrl, motion, b}} }
{ return nil, restore, err }
conditional_block
input.go
package tui import ( "context" "fmt" "sync" "time" "unicode/utf8" "golang.org/x/crypto/ssh/terminal" ) const ( // Basic keycodes, map directly to their ANSI numbering // order strictly matters Null rune = iota // ctrl-space CtrlA CtrlB CtrlC CtrlD // aka EOF CtrlE CtrlF CtrlG CtrlH // \b backsp But actual backspace is 127. CtrlBackspace is 8 Tab // \t aka Ctrl-I CtrlJ // \n, sometimes enter CtrlK CtrlL CtrlM // aka \r, also sometimes enter CtrlN CtrlO CtrlP CtrlQ CtrlR CtrlS CtrlT CtrlU CtrlV CtrlW CtrlX CtrlY CtrlZ ESC // ctrl-[ CtrlFwdSlash // ctrl-\ CtrlBackBracket // ctrl-] CtrlCaret // ctrl-6 (^) CtrlUnderscore // shift-ctrl - /* 32-126 are the printable keyboard keys For regular pressing of these keys, the following table list isn't used. The keys are returned as KeyPrintable with their rune value. Where KeySpecial for the same rune value, it is equivalent to Alt[key]. */ AltSpace // [ESC, 32] = Alt + space AltBang // [ESC, 33] i.e. shift-alt-1 AltDQuo AltHash AltDollar AltPercent AltAmpersand AltSQuo AltOpenParen AltCloseParen AltStar AltPlus AltComma AltMinus AltPeriod AltSlash Alt0 Alt1 Alt2 Alt3 Alt4 Alt5 Alt6 Alt7 Alt8 Alt9 AltColon AltSemicolon AltLT AltEqual AltGT AltQuestion AltAt AltA AltB AltC AltD AltE AltF AltG AltH AltI AltJ AltK AltL AltM AltN AltO AltP AltQ AltR AltS AltT AltU AltV AltW AltX AltY AltZ AltOpenBracket AltFwdSlash AltCloseBracket AltCaret AltUnderscore AltGrave Alta Altb Altc Altd Alte Altf Altg Alth Alti Altj Altk Altl Altm Altn Alto Altp Altq Altr Alts Altt Altu Altv Altw Altx Alty Altz AltOpenCurly AltPipe AltCloseCurly AltTilde AltBS //End printable // actual number values below are arbitrary, are not in order. // Extended keyCodes Del AltDel BTab BSpace PgUp PgDn Up Down Right Left Home End // /relative/ order of the next 8 matter SUp // Shift SDown SRight SLeft CtrlUp CtrlDown CtrlRight CtrlLeft // don't actually need to be in order F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 //sequential calculated. Keep in relative order CtrlAlta CtrlAltb CtrlAltc CtrlAltd CtrlAlte CtrlAltf CtrlAltg CtrlAlth CtrlAlti CtrlAltj CtrlAltk CtrlAltl CtrlAltm CtrlAltn CtrlAlto CtrlAltp CtrlAltq CtrlAltr CtrlAlts CtrlAltt CtrlAltu CtrlAltv CtrlAltw CtrlAltx CtrlAlty CtrlAltz ) type EvType uint8 const ( EventInvalid EvType = iota KeySpecial KeyPrint Mouse ) /* How to determine mouse action: Mousedown: Type=Mouse && Btn != 3 && !Motion Mouseup: Type=Mouse && Btn == 3 && !Motion Mousedrag: Type=Mouse && Btn != 3 && Motion Mousemove: Type=Mouse && Btn == 3 && Motion ScrollUp: Type=Mouse && Btn=4 ScrollDn: Type=Mouse && Btn=5 */ type MouseEvent struct { Y int X int Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown Shift bool Meta bool Ctrl bool Motion bool buf []byte } type Event struct { Type EvType Key rune M *MouseEvent } // returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation) func Printable(i int) bool { return i >= 32 && i <= 126 } /* Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things: - a channel to listen for key events on - a terminal restore function. Always safe to call, especially when error is set - error condition This is the primary use of the top-level tui package, if you intend to capture input, or mouse events */ func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) { ch := make(chan Event, 1000) st, err := terminal.GetState(fd) if err != nil { return nil, func() error { return nil }, err } restore := func() error { return terminal.Restore(fd, st) } _, err = terminal.MakeRaw(fd) if err != nil { return nil, restore, err } ib := inputBuf{b: make([]byte, 0, 9)} go func() { for { select { case <-ctx.Done(): return case ev := <-ib.readEvent(fd): ch <- ev } } }() return ch, restore, nil } type inputBuf struct { b []byte mu sync.Mutex } func (ib *inputBuf) readEvent(fd int) <-chan Event
/* * Gets first byte, blocking to do so. * Tries to get any extra bytes within a 100ms timespan * like esc key sequences (arrows, etc) * */ func fillBuf(fd int, buf []byte) []byte { const pollInt = 5 //ms const span = 100 //ms -- reflected via retries*pollInt c, ok := getchar(fd, false) if !ok { return buf } buf = append(buf, byte(c)) retries := 0 if c == int(ESC) { retries = span / pollInt // 20 } pc := c for { c, ok := getchar(fd, true) if !ok { if retries > 0 { retries-- time.Sleep(pollInt * time.Millisecond) continue } break } else if c == int(ESC) && pc != c { retries = span / pollInt // got the next char, keep going } else { retries = 0 } buf = append(buf, byte(c)) pc = c } return buf } func getchar(fd int, nonblock bool) (int, bool) { b := make([]byte, 1) err := setNonBlock(fd, nonblock) if err != nil { return 0, false } if n, err := sysRead(fd, b); err != nil || n < 1 { return 0, false } return int(b[0]), true } //@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp //http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm //this is the ugliest, code ever. to check the seemingly most random //assignment of codes to meaningful keys func (ib *inputBuf) escSequence(sz *int) Event { if len(ib.b) < 2 { return Event{KeySpecial, ESC, nil} } *sz = 2 switch ib.b[1] { case byte(ESC): return Event{KeySpecial, ESC, nil} case 127: return Event{KeySpecial, AltBS, nil} case 91, 79: // [, O if len(ib.b) < 3 { if ib.b[1] == '[' { return Event{KeySpecial, AltOpenBracket, nil} } else if ib.b[1] == 'O' { return Event{KeySpecial, AltO, nil} } return debugEv(ib.b) } *sz = 3 switch ib.b[2] { case 65: return Event{KeySpecial, Up, nil} case 66: return Event{KeySpecial, Down, nil} case 67: return Event{KeySpecial, Right, nil} case 68: return Event{KeySpecial, Left, nil} case 90: return Event{KeySpecial, BTab, nil} case 72: return Event{KeySpecial, Home, nil} case 70: return Event{KeySpecial, End, nil} case 77: return ib.mouseSequence(sz) case 80: return Event{KeySpecial, F1, nil} case 81: return Event{KeySpecial, F2, nil} case 82: return Event{KeySpecial, F3, nil} case 49, 50, 51, 52, 53, 54: if len(ib.b) < 4 { return debugEv(ib.b) } *sz = 4 switch ib.b[2] { case 50: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 48: return Event{KeySpecial, F9, nil} case 49: return Event{KeySpecial, F10, nil} // @todo: WTF does 50 mean? case 51: return Event{KeySpecial, F11, nil} case 52: return Event{KeySpecial, F12, nil} } } else if ib.b[3] == '0' && (ib.b[4] == '0' || ib.b[4] == '1') && ib.b[5] == '~' { // bracketed paste mode. \e[200~ .. \e[201~ //discard seq from buffer ib.b = ib.b[6:] *sz = 0 return Event{KeySpecial, Null, nil} } return debugEv(ib.b) case 51: if len(ib.b) >= 6 && ib.b[3] == 59 && ib.b[4] == 51 && ib.b[5] == '~' { *sz = 6 return Event{KeySpecial, AltDel, nil} } return Event{KeySpecial, Del, nil} case 52: return Event{KeySpecial, End, nil} case 53: return Event{KeySpecial, PgUp, nil} case 54: return Event{KeySpecial, PgDn, nil} case 49: //'1' switch ib.b[3] { case 126: return Event{KeySpecial, Home, nil} case 53, 55, 56, 57: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 53: return Event{KeySpecial, F5, nil} case 55: return Event{KeySpecial, F6, nil} case 56: return Event{KeySpecial, F7, nil} case 57: return Event{KeySpecial, F8, nil} } } return debugEv(ib.b) case ';': //59 if len(ib.b) != 6 { return debugEv(ib.b) } *sz = 6 if ib.b[4] != '2' && ib.b[4] != '5' { return debugEv(ib.b) } if ib.b[5] < 'A' || ib.b[5] > 'D' { return debugEv(ib.b) } //ESC[1;2A == shift-up //ESC[1;5A == ctrl-up k := SUp if ib.b[4] == '5' { // move to up Ctrl* k += 4 } k += rune(int(ib.b[5]) - int('A')) // set arrow direction return Event{KeySpecial, k, nil} } } } } // ESC-0 ~ ESC-26 == ctrl-alt-[key] if ib.b[1] >= 1 && ib.b[1] <= 'z'-'a'+1 { return Event{KeySpecial, rune(int(CtrlAlta) + int(ib.b[1]) - 1), nil} } // ESC-32 ~ ESC-126 == alt-[key] if Printable(int(ib.b[1])) { return Event{KeySpecial, rune(ib.b[1]), nil} } return debugEv(ib.b) } // mouse stuff func debugEv(buf []byte) Event { b := make([]byte, len(buf)) copy(b, buf) return Event{EventInvalid, 0, &MouseEvent{0, 0, 0, false, false, false, false, b}} } // https://www.xfree86.org/current/ctlseqs.html#Mouse%20Tracking // \x1b[M<button><x+33><y+33> func (ib *inputBuf) mouseSequence(sz *int) Event { b := make([]byte, len(ib.b)) copy(b, ib.b) if len(ib.b) < 6 { fmt.Printf("short mouse seq: %v\n", ib.b) return debugEv(ib.b) } *sz = 6 evCode := int(b[3] - 32) bNum := evCode & 0x3 // low two bits, 00=MB1, 01=MB2, 10=MB3, 11=Release if evCode&(1<<6) != 0 { bNum += 4 // scroll buttons set a high bit (+32) } shift := evCode&(1<<2) != 0 // 4 meta := evCode&(1<<3) != 0 // 8 ctrl := evCode&(1<<4) != 0 // 16 motion := evCode&(1<<5) != 0 //32, motion indicator x := int(ib.b[4] - 33) y := int(ib.b[5] - 33) // - yoffset if any return Event{Mouse, 0, &MouseEvent{y, x, bNum, shift, meta, ctrl, motion, b}} }
{ ch := make(chan Event) go func() { ib.mu.Lock() defer func() { ib.mu.Unlock() }() for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) { } if len(ib.b) == 0 { close(ch) return } sz := 1 defer func() { ib.b = ib.b[sz:] }() switch ib.b[0] { case byte(CtrlC), byte(CtrlG), byte(CtrlQ): ch <- Event{KeySpecial, rune(ib.b[0]), nil} return case 127: ch <- Event{KeySpecial, BSpace, nil} return case 0: ch <- Event{KeySpecial, Null, nil} // Ctrl-space? return case byte(ESC): ch <- ib.escSequence(&sz) return } if ib.b[0] < 32 { // Ctrl-A_Z ch <- Event{KeySpecial, rune(ib.b[0]), nil} return } char, rsz := utf8.DecodeRune(ib.b) if char == utf8.RuneError { ch <- Event{KeySpecial, ESC, nil} return } sz = rsz ch <- Event{KeyPrint, char, nil} }() return ch }
identifier_body
input.go
package tui import ( "context" "fmt" "sync" "time" "unicode/utf8" "golang.org/x/crypto/ssh/terminal" ) const ( // Basic keycodes, map directly to their ANSI numbering // order strictly matters Null rune = iota // ctrl-space CtrlA CtrlB CtrlC CtrlD // aka EOF CtrlE CtrlF CtrlG CtrlH // \b backsp But actual backspace is 127. CtrlBackspace is 8 Tab // \t aka Ctrl-I CtrlJ // \n, sometimes enter CtrlK CtrlL CtrlM // aka \r, also sometimes enter CtrlN CtrlO CtrlP CtrlQ CtrlR CtrlS CtrlT CtrlU CtrlV CtrlW CtrlX CtrlY CtrlZ ESC // ctrl-[ CtrlFwdSlash // ctrl-\ CtrlBackBracket // ctrl-] CtrlCaret // ctrl-6 (^) CtrlUnderscore // shift-ctrl - /* 32-126 are the printable keyboard keys For regular pressing of these keys, the following table list isn't used. The keys are returned as KeyPrintable with their rune value. Where KeySpecial for the same rune value, it is equivalent to Alt[key]. */ AltSpace // [ESC, 32] = Alt + space AltBang // [ESC, 33] i.e. shift-alt-1 AltDQuo AltHash AltDollar AltPercent AltAmpersand AltSQuo AltOpenParen AltCloseParen AltStar AltPlus AltComma AltMinus AltPeriod AltSlash Alt0 Alt1 Alt2 Alt3 Alt4 Alt5 Alt6 Alt7 Alt8 Alt9 AltColon AltSemicolon AltLT AltEqual AltGT AltQuestion AltAt AltA AltB AltC AltD AltE AltF AltG AltH AltI AltJ AltK AltL AltM AltN AltO AltP AltQ AltR AltS AltT AltU AltV AltW AltX AltY AltZ AltOpenBracket AltFwdSlash AltCloseBracket AltCaret AltUnderscore AltGrave Alta Altb Altc Altd Alte Altf Altg Alth Alti Altj Altk Altl Altm Altn Alto Altp Altq Altr Alts Altt Altu Altv Altw Altx Alty Altz AltOpenCurly AltPipe AltCloseCurly AltTilde AltBS //End printable // actual number values below are arbitrary, are not in order. // Extended keyCodes Del AltDel BTab BSpace PgUp PgDn Up Down Right Left Home End // /relative/ order of the next 8 matter SUp // Shift SDown SRight SLeft CtrlUp CtrlDown CtrlRight CtrlLeft // don't actually need to be in order F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 //sequential calculated. Keep in relative order CtrlAlta CtrlAltb CtrlAltc CtrlAltd CtrlAlte CtrlAltf CtrlAltg CtrlAlth CtrlAlti CtrlAltj CtrlAltk CtrlAltl CtrlAltm CtrlAltn CtrlAlto CtrlAltp CtrlAltq CtrlAltr CtrlAlts CtrlAltt CtrlAltu CtrlAltv CtrlAltw CtrlAltx CtrlAlty
type EvType uint8 const ( EventInvalid EvType = iota KeySpecial KeyPrint Mouse ) /* How to determine mouse action: Mousedown: Type=Mouse && Btn != 3 && !Motion Mouseup: Type=Mouse && Btn == 3 && !Motion Mousedrag: Type=Mouse && Btn != 3 && Motion Mousemove: Type=Mouse && Btn == 3 && Motion ScrollUp: Type=Mouse && Btn=4 ScrollDn: Type=Mouse && Btn=5 */ type MouseEvent struct { Y int X int Btn int // 0=Primary, 1=Middle, 2=Right, 3=Release, 4=ScrUp, 5=ScrDown Shift bool Meta bool Ctrl bool Motion bool buf []byte } type Event struct { Type EvType Key rune M *MouseEvent } // returns true if a specific character int(rune) is a printable character (alphanumeric, punctuation) func Printable(i int) bool { return i >= 32 && i <= 126 } /* Grabs Stdin(or whatever passed fd) to listen for keyboard input. Returns 3 things: - a channel to listen for key events on - a terminal restore function. Always safe to call, especially when error is set - error condition This is the primary use of the top-level tui package, if you intend to capture input, or mouse events */ func GetInput(ctx context.Context, fd int) (<-chan Event, func() error, error) { ch := make(chan Event, 1000) st, err := terminal.GetState(fd) if err != nil { return nil, func() error { return nil }, err } restore := func() error { return terminal.Restore(fd, st) } _, err = terminal.MakeRaw(fd) if err != nil { return nil, restore, err } ib := inputBuf{b: make([]byte, 0, 9)} go func() { for { select { case <-ctx.Done(): return case ev := <-ib.readEvent(fd): ch <- ev } } }() return ch, restore, nil } type inputBuf struct { b []byte mu sync.Mutex } func (ib *inputBuf) readEvent(fd int) <-chan Event { ch := make(chan Event) go func() { ib.mu.Lock() defer func() { ib.mu.Unlock() }() for ; len(ib.b) == 0; ib.b = fillBuf(fd, ib.b) { } if len(ib.b) == 0 { close(ch) return } sz := 1 defer func() { ib.b = ib.b[sz:] }() switch ib.b[0] { case byte(CtrlC), byte(CtrlG), byte(CtrlQ): ch <- Event{KeySpecial, rune(ib.b[0]), nil} return case 127: ch <- Event{KeySpecial, BSpace, nil} return case 0: ch <- Event{KeySpecial, Null, nil} // Ctrl-space? return case byte(ESC): ch <- ib.escSequence(&sz) return } if ib.b[0] < 32 { // Ctrl-A_Z ch <- Event{KeySpecial, rune(ib.b[0]), nil} return } char, rsz := utf8.DecodeRune(ib.b) if char == utf8.RuneError { ch <- Event{KeySpecial, ESC, nil} return } sz = rsz ch <- Event{KeyPrint, char, nil} }() return ch } /* * Gets first byte, blocking to do so. * Tries to get any extra bytes within a 100ms timespan * like esc key sequences (arrows, etc) * */ func fillBuf(fd int, buf []byte) []byte { const pollInt = 5 //ms const span = 100 //ms -- reflected via retries*pollInt c, ok := getchar(fd, false) if !ok { return buf } buf = append(buf, byte(c)) retries := 0 if c == int(ESC) { retries = span / pollInt // 20 } pc := c for { c, ok := getchar(fd, true) if !ok { if retries > 0 { retries-- time.Sleep(pollInt * time.Millisecond) continue } break } else if c == int(ESC) && pc != c { retries = span / pollInt // got the next char, keep going } else { retries = 0 } buf = append(buf, byte(c)) pc = c } return buf } func getchar(fd int, nonblock bool) (int, bool) { b := make([]byte, 1) err := setNonBlock(fd, nonblock) if err != nil { return 0, false } if n, err := sysRead(fd, b); err != nil || n < 1 { return 0, false } return int(b[0]), true } //@todo: more shift/ctrl/alt of extended keys like Home, F#, PgUp //http://www.manmrk.net/tutorials/ISPF/XE/xehelp/html/HID00000594.htm //this is the ugliest, code ever. to check the seemingly most random //assignment of codes to meaningful keys func (ib *inputBuf) escSequence(sz *int) Event { if len(ib.b) < 2 { return Event{KeySpecial, ESC, nil} } *sz = 2 switch ib.b[1] { case byte(ESC): return Event{KeySpecial, ESC, nil} case 127: return Event{KeySpecial, AltBS, nil} case 91, 79: // [, O if len(ib.b) < 3 { if ib.b[1] == '[' { return Event{KeySpecial, AltOpenBracket, nil} } else if ib.b[1] == 'O' { return Event{KeySpecial, AltO, nil} } return debugEv(ib.b) } *sz = 3 switch ib.b[2] { case 65: return Event{KeySpecial, Up, nil} case 66: return Event{KeySpecial, Down, nil} case 67: return Event{KeySpecial, Right, nil} case 68: return Event{KeySpecial, Left, nil} case 90: return Event{KeySpecial, BTab, nil} case 72: return Event{KeySpecial, Home, nil} case 70: return Event{KeySpecial, End, nil} case 77: return ib.mouseSequence(sz) case 80: return Event{KeySpecial, F1, nil} case 81: return Event{KeySpecial, F2, nil} case 82: return Event{KeySpecial, F3, nil} case 49, 50, 51, 52, 53, 54: if len(ib.b) < 4 { return debugEv(ib.b) } *sz = 4 switch ib.b[2] { case 50: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 48: return Event{KeySpecial, F9, nil} case 49: return Event{KeySpecial, F10, nil} // @todo: WTF does 50 mean? case 51: return Event{KeySpecial, F11, nil} case 52: return Event{KeySpecial, F12, nil} } } else if ib.b[3] == '0' && (ib.b[4] == '0' || ib.b[4] == '1') && ib.b[5] == '~' { // bracketed paste mode. \e[200~ .. \e[201~ //discard seq from buffer ib.b = ib.b[6:] *sz = 0 return Event{KeySpecial, Null, nil} } return debugEv(ib.b) case 51: if len(ib.b) >= 6 && ib.b[3] == 59 && ib.b[4] == 51 && ib.b[5] == '~' { *sz = 6 return Event{KeySpecial, AltDel, nil} } return Event{KeySpecial, Del, nil} case 52: return Event{KeySpecial, End, nil} case 53: return Event{KeySpecial, PgUp, nil} case 54: return Event{KeySpecial, PgDn, nil} case 49: //'1' switch ib.b[3] { case 126: return Event{KeySpecial, Home, nil} case 53, 55, 56, 57: if len(ib.b) == 5 && ib.b[4] == 126 { *sz = 5 switch ib.b[3] { case 53: return Event{KeySpecial, F5, nil} case 55: return Event{KeySpecial, F6, nil} case 56: return Event{KeySpecial, F7, nil} case 57: return Event{KeySpecial, F8, nil} } } return debugEv(ib.b) case ';': //59 if len(ib.b) != 6 { return debugEv(ib.b) } *sz = 6 if ib.b[4] != '2' && ib.b[4] != '5' { return debugEv(ib.b) } if ib.b[5] < 'A' || ib.b[5] > 'D' { return debugEv(ib.b) } //ESC[1;2A == shift-up //ESC[1;5A == ctrl-up k := SUp if ib.b[4] == '5' { // move to up Ctrl* k += 4 } k += rune(int(ib.b[5]) - int('A')) // set arrow direction return Event{KeySpecial, k, nil} } } } } // ESC-0 ~ ESC-26 == ctrl-alt-[key] if ib.b[1] >= 1 && ib.b[1] <= 'z'-'a'+1 { return Event{KeySpecial, rune(int(CtrlAlta) + int(ib.b[1]) - 1), nil} } // ESC-32 ~ ESC-126 == alt-[key] if Printable(int(ib.b[1])) { return Event{KeySpecial, rune(ib.b[1]), nil} } return debugEv(ib.b) } // mouse stuff func debugEv(buf []byte) Event { b := make([]byte, len(buf)) copy(b, buf) return Event{EventInvalid, 0, &MouseEvent{0, 0, 0, false, false, false, false, b}} } // https://www.xfree86.org/current/ctlseqs.html#Mouse%20Tracking // \x1b[M<button><x+33><y+33> func (ib *inputBuf) mouseSequence(sz *int) Event { b := make([]byte, len(ib.b)) copy(b, ib.b) if len(ib.b) < 6 { fmt.Printf("short mouse seq: %v\n", ib.b) return debugEv(ib.b) } *sz = 6 evCode := int(b[3] - 32) bNum := evCode & 0x3 // low two bits, 00=MB1, 01=MB2, 10=MB3, 11=Release if evCode&(1<<6) != 0 { bNum += 4 // scroll buttons set a high bit (+32) } shift := evCode&(1<<2) != 0 // 4 meta := evCode&(1<<3) != 0 // 8 ctrl := evCode&(1<<4) != 0 // 16 motion := evCode&(1<<5) != 0 //32, motion indicator x := int(ib.b[4] - 33) y := int(ib.b[5] - 33) // - yoffset if any return Event{Mouse, 0, &MouseEvent{y, x, bNum, shift, meta, ctrl, motion, b}} }
CtrlAltz )
random_line_split