file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
path.rs
//! File path utilities. //! //! Some of the functions are similar to [`std::path::Path`] ones, but here they //! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr). use crate::co; use crate::decl::*; use crate::guard::*; use crate::prelude::*; /// Returns an iterator over the files and folders within a directory. /// Optionally, a wildcard can be specified to filter files by name. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// Listing all text files in a directory: /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_list<'a>( dir_path: &'a str, filter: Option<&'a str>, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirListIter::new(dir_path.to_owned(), filter) } /// Returns an interator over the files within a directory, and all its /// subdirectories, recursively. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// // Ordinary for loop /// for file_path in w::path::dir_walk("C:\\Temp") { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// /// // Closure with try_for_each /// w::path::dir_walk("C:\\Temp") /// .try_for_each(|file_path| { /// let file_path = file_path?; /// println!("{}", file_path); /// Ok(()) /// })?; /// /// // Collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .collect::<w::SysResult<Vec<_>>>()?; /// /// // Transforming and collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .map(|file_path| { /// let file_path = file_path?; /// Ok(format!("PATH: {}", file_path)) /// }) /// .collect::<w::SysResult<Vec<_>>>()?; /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_walk<'a>( dir_path: &'a str, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirWalkIter::new(dir_path.to_owned()) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(debug_assertions)] #[must_use] pub fn exe_path() -> SysResult<String> { let dbg = HINSTANCE::NULL.GetModuleFileName()?; Ok( get_path( // target get_path( // debug get_path(&dbg).unwrap(), // exe name ).unwrap(), ).unwrap() .to_owned(), ) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(not(debug_assertions))] #[must_use] pub fn exe_path() -> SysResult<String> { Ok( get_path(&HINSTANCE::NULL.GetModuleFileName()?) .unwrap().to_owned(), ) } /// Returns true if the path exists. #[must_use] pub fn exists(full_path: &str) -> bool { GetFileAttributes(full_path).is_ok() } /// Extracts the file name from a full path, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt /// ``` #[must_use] pub fn get_file_name(full_path: &str) -> Option<&str>
/// Extracts the full path, but the last part. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx /// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx /// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp" /// ``` #[must_use] pub fn get_path(full_path: &str) -> Option<&str> { full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path .map(|idx| &full_path[0..idx]) } /// Tells whether the full path ends in one of the given extensions, /// case-insensitive. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// println!("{}", /// w::path::has_extension("file.txt", &[".txt", ".bat"])); /// ``` #[must_use] pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool { let full_path_u = full_path.to_uppercase(); extensions.iter() .find(|ext| { let ext_u = ext.as_ref().to_uppercase(); full_path_u.ends_with(&ext_u) }) .is_some() } /// Returns true if the path is a directory. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_directory(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::DIRECTORY) } /// Returns true if the path is hidden. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_hidden(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::HIDDEN) } /// Replaces the extension by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_extension( /// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh /// ``` #[must_use] pub fn replace_extension(full_path: &str, new_extension: &str) -> String { if let Some(last) = full_path.chars().last() { if last == '\\' { // full_path is a directory, do nothing return rtrim_backslash(full_path).to_owned(); } } let new_has_dot = new_extension.chars().next() == Some('.'); match full_path.rfind('.') { None => format!("{}{}{}", // file name without extension, just append it full_path, if new_has_dot { "" } else { "." }, new_extension, ), Some(idx) => format!("{}{}{}", &full_path[0..idx], if new_has_dot { "" } else { "." }, new_extension, ), } } /// Replaces the file name by the given one. #[must_use] pub fn replace_file_name(full_path: &str, new_file: &str) -> String { match get_path(full_path) { None => new_file.to_owned(), Some(path) => format!("{}\\{}", path, new_file), } } /// Keeps the file name and replaces the path by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_path( // C:\another\foo.txt /// "C:\\Temp\\foo.txt", /// "C:\\another", /// ); /// ``` #[must_use] pub fn replace_path(full_path: &str, new_path: &str) -> String { let file_name = get_file_name(full_path); format!("{}{}{}", rtrim_backslash(new_path), if file_name.is_some() { "\\" } else { "" }, file_name.unwrap_or("")) } /// Removes a trailing backslash, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp /// ``` #[must_use] pub fn rtrim_backslash(full_path: &str) -> &str { match full_path.chars().last() { None => full_path, // empty string Some(last_ch) => if last_ch == '\\' { let mut chars = full_path.chars(); chars.next_back(); // remove last char chars.as_str() } else { full_path // no trailing backslash }, } } /// Returns a `Vec` with each part of the full path. #[must_use] pub fn split_parts(full_path: &str) -> Vec<&str> { let no_bs = rtrim_backslash(full_path); no_bs.split('\\').collect() } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirListIter<'a> { dir_path: String, filter: Option<&'a str>, hfind: Option<FindCloseGuard>, wfd: WIN32_FIND_DATA, no_more: bool, } impl<'a> Iterator for DirListIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more { return None; } let found = match &self.hfind { None => { // first pass let dir_final = match self.filter { None => format!("{}\\*", self.dir_path), Some(filter) => format!("{}\\{}", self.dir_path, filter), }; let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok((hfind, found)) => { self.hfind = Some(hfind); // store our find handle found }, }; found }, Some(hfind) => { // subsequent passes match hfind.FindNextFile(&mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok(found) => found, } }, }; if found { let file_name = self.wfd.cFileName(); if file_name == "." || file_name == ".." { // skip these self.next() } else { Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName()))) } } else { None } } } impl<'a> DirListIter<'a> { pub(in crate::kernel) fn new( dir_path: String, filter: Option<&'a str>, ) -> Self { Self { dir_path: rtrim_backslash(&dir_path).to_owned(), filter, hfind: None, wfd: WIN32_FIND_DATA::default(), no_more: false, } } } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirWalkIter<'a> { runner: DirListIter<'a>, subdir_runner: Option<Box<DirWalkIter<'a>>>, no_more: bool, } impl<'a> Iterator for DirWalkIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more { return None; } match &mut self.subdir_runner { None => { let cur_file = self.runner.next(); match cur_file { None => None, Some(cur_file) => { match cur_file { Err(e) => { self.no_more = true; // prevent further iterations Some(Err(e)) }, Ok(cur_file) => { if is_directory(&cur_file) { self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively self.next() } else { Some(Ok(cur_file)) } }, } }, } }, Some(subdir_runner) => { let inner_file = subdir_runner.next(); match inner_file { None => { // subdir_runner finished his work self.subdir_runner = None; self.next() }, Some(inner_file) => { Some(inner_file) }, } }, } } } impl<'a> DirWalkIter<'a> { pub(in crate::kernel) fn new(dir_path: String) -> Self { Self { runner: DirListIter::new(dir_path, None), subdir_runner: None, no_more: false, } } }
{ match full_path.rfind('\\') { None => Some(full_path), // if no backslash, the whole string is the file name Some(idx) => if idx == full_path.chars().count() - 1 { None // last char is '\\', no file name } else { Some(&full_path[idx + 1..]) }, } }
identifier_body
path.rs
//! File path utilities. //! //! Some of the functions are similar to [`std::path::Path`] ones, but here they //! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr). use crate::co; use crate::decl::*; use crate::guard::*; use crate::prelude::*; /// Returns an iterator over the files and folders within a directory. /// Optionally, a wildcard can be specified to filter files by name. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// Listing all text files in a directory: /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_list<'a>( dir_path: &'a str, filter: Option<&'a str>, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirListIter::new(dir_path.to_owned(), filter) } /// Returns an interator over the files within a directory, and all its /// subdirectories, recursively. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// // Ordinary for loop /// for file_path in w::path::dir_walk("C:\\Temp") { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// /// // Closure with try_for_each /// w::path::dir_walk("C:\\Temp") /// .try_for_each(|file_path| { /// let file_path = file_path?; /// println!("{}", file_path); /// Ok(()) /// })?; /// /// // Collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .collect::<w::SysResult<Vec<_>>>()?; /// /// // Transforming and collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .map(|file_path| { /// let file_path = file_path?; /// Ok(format!("PATH: {}", file_path)) /// }) /// .collect::<w::SysResult<Vec<_>>>()?; /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_walk<'a>( dir_path: &'a str, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirWalkIter::new(dir_path.to_owned()) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(debug_assertions)] #[must_use] pub fn exe_path() -> SysResult<String> { let dbg = HINSTANCE::NULL.GetModuleFileName()?; Ok( get_path( // target get_path( // debug get_path(&dbg).unwrap(), // exe name ).unwrap(), ).unwrap() .to_owned(), ) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(not(debug_assertions))] #[must_use] pub fn exe_path() -> SysResult<String> { Ok( get_path(&HINSTANCE::NULL.GetModuleFileName()?) .unwrap().to_owned(), ) } /// Returns true if the path exists. #[must_use] pub fn exists(full_path: &str) -> bool { GetFileAttributes(full_path).is_ok() } /// Extracts the file name from a full path, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt /// ``` #[must_use] pub fn get_file_name(full_path: &str) -> Option<&str> { match full_path.rfind('\\') { None => Some(full_path), // if no backslash, the whole string is the file name Some(idx) => if idx == full_path.chars().count() - 1 { None // last char is '\\', no file name } else { Some(&full_path[idx + 1..]) }, } } /// Extracts the full path, but the last part. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx /// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx /// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp" /// ``` #[must_use] pub fn get_path(full_path: &str) -> Option<&str> { full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path .map(|idx| &full_path[0..idx]) } /// Tells whether the full path ends in one of the given extensions, /// case-insensitive. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// println!("{}", /// w::path::has_extension("file.txt", &[".txt", ".bat"])); /// ``` #[must_use] pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool { let full_path_u = full_path.to_uppercase(); extensions.iter() .find(|ext| { let ext_u = ext.as_ref().to_uppercase(); full_path_u.ends_with(&ext_u) }) .is_some() } /// Returns true if the path is a directory. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_directory(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::DIRECTORY) } /// Returns true if the path is hidden. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_hidden(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::HIDDEN) } /// Replaces the extension by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_extension( /// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh /// ``` #[must_use] pub fn replace_extension(full_path: &str, new_extension: &str) -> String { if let Some(last) = full_path.chars().last() { if last == '\\' { // full_path is a directory, do nothing return rtrim_backslash(full_path).to_owned(); } } let new_has_dot = new_extension.chars().next() == Some('.'); match full_path.rfind('.') { None => format!("{}{}{}", // file name without extension, just append it full_path, if new_has_dot { "" } else { "." }, new_extension, ), Some(idx) => format!("{}{}{}", &full_path[0..idx], if new_has_dot { "" } else { "." }, new_extension, ), } } /// Replaces the file name by the given one. #[must_use] pub fn replace_file_name(full_path: &str, new_file: &str) -> String { match get_path(full_path) { None => new_file.to_owned(), Some(path) => format!("{}\\{}", path, new_file), } } /// Keeps the file name and replaces the path by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_path( // C:\another\foo.txt /// "C:\\Temp\\foo.txt", /// "C:\\another", /// ); /// ``` #[must_use] pub fn replace_path(full_path: &str, new_path: &str) -> String { let file_name = get_file_name(full_path); format!("{}{}{}", rtrim_backslash(new_path), if file_name.is_some() { "\\" } else { "" }, file_name.unwrap_or("")) } /// Removes a trailing backslash, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp /// ``` #[must_use] pub fn rtrim_backslash(full_path: &str) -> &str { match full_path.chars().last() { None => full_path, // empty string Some(last_ch) => if last_ch == '\\' { let mut chars = full_path.chars(); chars.next_back(); // remove last char chars.as_str() } else { full_path // no trailing backslash }, } } /// Returns a `Vec` with each part of the full path. #[must_use] pub fn split_parts(full_path: &str) -> Vec<&str> { let no_bs = rtrim_backslash(full_path); no_bs.split('\\').collect() } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirListIter<'a> { dir_path: String, filter: Option<&'a str>, hfind: Option<FindCloseGuard>, wfd: WIN32_FIND_DATA, no_more: bool, } impl<'a> Iterator for DirListIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more { return None; } let found = match &self.hfind { None => { // first pass let dir_final = match self.filter { None => format!("{}\\*", self.dir_path), Some(filter) => format!("{}\\{}", self.dir_path, filter), }; let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok((hfind, found)) => { self.hfind = Some(hfind); // store our find handle found }, }; found }, Some(hfind) => { // subsequent passes match hfind.FindNextFile(&mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok(found) => found, } }, }; if found { let file_name = self.wfd.cFileName(); if file_name == "." || file_name == ".." { // skip these self.next() } else { Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName()))) } } else { None } } } impl<'a> DirListIter<'a> { pub(in crate::kernel) fn new( dir_path: String, filter: Option<&'a str>, ) -> Self { Self { dir_path: rtrim_backslash(&dir_path).to_owned(), filter, hfind: None, wfd: WIN32_FIND_DATA::default(), no_more: false, } } } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirWalkIter<'a> { runner: DirListIter<'a>, subdir_runner: Option<Box<DirWalkIter<'a>>>, no_more: bool, } impl<'a> Iterator for DirWalkIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more
match &mut self.subdir_runner { None => { let cur_file = self.runner.next(); match cur_file { None => None, Some(cur_file) => { match cur_file { Err(e) => { self.no_more = true; // prevent further iterations Some(Err(e)) }, Ok(cur_file) => { if is_directory(&cur_file) { self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively self.next() } else { Some(Ok(cur_file)) } }, } }, } }, Some(subdir_runner) => { let inner_file = subdir_runner.next(); match inner_file { None => { // subdir_runner finished his work self.subdir_runner = None; self.next() }, Some(inner_file) => { Some(inner_file) }, } }, } } } impl<'a> DirWalkIter<'a> { pub(in crate::kernel) fn new(dir_path: String) -> Self { Self { runner: DirListIter::new(dir_path, None), subdir_runner: None, no_more: false, } } }
{ return None; }
conditional_block
path.rs
//! File path utilities. //! //! Some of the functions are similar to [`std::path::Path`] ones, but here they //! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr). use crate::co; use crate::decl::*; use crate::guard::*; use crate::prelude::*; /// Returns an iterator over the files and folders within a directory. /// Optionally, a wildcard can be specified to filter files by name. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// Listing all text files in a directory: /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_list<'a>( dir_path: &'a str, filter: Option<&'a str>, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirListIter::new(dir_path.to_owned(), filter) } /// Returns an interator over the files within a directory, and all its /// subdirectories, recursively. /// /// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE) /// iteration functions. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// // Ordinary for loop /// for file_path in w::path::dir_walk("C:\\Temp") { /// let file_path = file_path?; /// println!("{}", file_path); /// } /// /// // Closure with try_for_each /// w::path::dir_walk("C:\\Temp") /// .try_for_each(|file_path| { /// let file_path = file_path?; /// println!("{}", file_path); /// Ok(()) /// })?; /// /// // Collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .collect::<w::SysResult<Vec<_>>>()?; /// /// // Transforming and collecting into a Vec /// let all = w::path::dir_walk("C:\\Temp") /// .map(|file_path| { /// let file_path = file_path?; /// Ok(format!("PATH: {}", file_path)) /// }) /// .collect::<w::SysResult<Vec<_>>>()?; /// # Ok::<_, winsafe::co::ERROR>(()) /// ``` #[must_use] pub fn dir_walk<'a>( dir_path: &'a str, ) -> impl Iterator<Item = SysResult<String>> + 'a { DirWalkIter::new(dir_path.to_owned()) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(debug_assertions)] #[must_use] pub fn exe_path() -> SysResult<String> { let dbg = HINSTANCE::NULL.GetModuleFileName()?; Ok( get_path( // target get_path( // debug get_path(&dbg).unwrap(), // exe name ).unwrap(), ).unwrap() .to_owned(), ) } /// Returns the path of the current EXE file, without the EXE filename, and /// without a trailing backslash. /// /// In a debug build, the `target\debug` folders will be suppressed. #[cfg(not(debug_assertions))] #[must_use] pub fn exe_path() -> SysResult<String> { Ok( get_path(&HINSTANCE::NULL.GetModuleFileName()?) .unwrap().to_owned(), ) } /// Returns true if the path exists. #[must_use] pub fn exists(full_path: &str) -> bool { GetFileAttributes(full_path).is_ok() } /// Extracts the file name from a full path, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt /// ``` #[must_use] pub fn get_file_name(full_path: &str) -> Option<&str> { match full_path.rfind('\\') { None => Some(full_path), // if no backslash, the whole string is the file name Some(idx) => if idx == full_path.chars().count() - 1 { None // last char is '\\', no file name } else { Some(&full_path[idx + 1..]) }, } } /// Extracts the full path, but the last part. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx /// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx /// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp" /// ``` #[must_use] pub fn get_path(full_path: &str) -> Option<&str> { full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path .map(|idx| &full_path[0..idx]) } /// Tells whether the full path ends in one of the given extensions, /// case-insensitive. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// println!("{}", /// w::path::has_extension("file.txt", &[".txt", ".bat"])); /// ``` #[must_use] pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool { let full_path_u = full_path.to_uppercase(); extensions.iter() .find(|ext| { let ext_u = ext.as_ref().to_uppercase(); full_path_u.ends_with(&ext_u) }) .is_some() } /// Returns true if the path is a directory. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_directory(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::DIRECTORY) } /// Returns true if the path is hidden. /// /// # Panics /// /// Panics if the path does not exist. #[must_use] pub fn is_hidden(full_path: &str) -> bool { let flags = GetFileAttributes(full_path).unwrap(); flags.has(co::FILE_ATTRIBUTE::HIDDEN) } /// Replaces the extension by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_extension( /// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh /// ``` #[must_use] pub fn
(full_path: &str, new_extension: &str) -> String { if let Some(last) = full_path.chars().last() { if last == '\\' { // full_path is a directory, do nothing return rtrim_backslash(full_path).to_owned(); } } let new_has_dot = new_extension.chars().next() == Some('.'); match full_path.rfind('.') { None => format!("{}{}{}", // file name without extension, just append it full_path, if new_has_dot { "" } else { "." }, new_extension, ), Some(idx) => format!("{}{}{}", &full_path[0..idx], if new_has_dot { "" } else { "." }, new_extension, ), } } /// Replaces the file name by the given one. #[must_use] pub fn replace_file_name(full_path: &str, new_file: &str) -> String { match get_path(full_path) { None => new_file.to_owned(), Some(path) => format!("{}\\{}", path, new_file), } } /// Keeps the file name and replaces the path by the given one. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::replace_path( // C:\another\foo.txt /// "C:\\Temp\\foo.txt", /// "C:\\another", /// ); /// ``` #[must_use] pub fn replace_path(full_path: &str, new_path: &str) -> String { let file_name = get_file_name(full_path); format!("{}{}{}", rtrim_backslash(new_path), if file_name.is_some() { "\\" } else { "" }, file_name.unwrap_or("")) } /// Removes a trailing backslash, if any. /// /// # Examples /// /// ```no_run /// use winsafe::{self as w, prelude::*}; /// /// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp /// ``` #[must_use] pub fn rtrim_backslash(full_path: &str) -> &str { match full_path.chars().last() { None => full_path, // empty string Some(last_ch) => if last_ch == '\\' { let mut chars = full_path.chars(); chars.next_back(); // remove last char chars.as_str() } else { full_path // no trailing backslash }, } } /// Returns a `Vec` with each part of the full path. #[must_use] pub fn split_parts(full_path: &str) -> Vec<&str> { let no_bs = rtrim_backslash(full_path); no_bs.split('\\').collect() } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirListIter<'a> { dir_path: String, filter: Option<&'a str>, hfind: Option<FindCloseGuard>, wfd: WIN32_FIND_DATA, no_more: bool, } impl<'a> Iterator for DirListIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more { return None; } let found = match &self.hfind { None => { // first pass let dir_final = match self.filter { None => format!("{}\\*", self.dir_path), Some(filter) => format!("{}\\{}", self.dir_path, filter), }; let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok((hfind, found)) => { self.hfind = Some(hfind); // store our find handle found }, }; found }, Some(hfind) => { // subsequent passes match hfind.FindNextFile(&mut self.wfd) { Err(e) => { self.no_more = true; // prevent further iterations return Some(Err(e)); }, Ok(found) => found, } }, }; if found { let file_name = self.wfd.cFileName(); if file_name == "." || file_name == ".." { // skip these self.next() } else { Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName()))) } } else { None } } } impl<'a> DirListIter<'a> { pub(in crate::kernel) fn new( dir_path: String, filter: Option<&'a str>, ) -> Self { Self { dir_path: rtrim_backslash(&dir_path).to_owned(), filter, hfind: None, wfd: WIN32_FIND_DATA::default(), no_more: false, } } } //------------------------------------------------------------------------------ pub(in crate::kernel) struct DirWalkIter<'a> { runner: DirListIter<'a>, subdir_runner: Option<Box<DirWalkIter<'a>>>, no_more: bool, } impl<'a> Iterator for DirWalkIter<'a> { type Item = SysResult<String>; fn next(&mut self) -> Option<Self::Item> { if self.no_more { return None; } match &mut self.subdir_runner { None => { let cur_file = self.runner.next(); match cur_file { None => None, Some(cur_file) => { match cur_file { Err(e) => { self.no_more = true; // prevent further iterations Some(Err(e)) }, Ok(cur_file) => { if is_directory(&cur_file) { self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively self.next() } else { Some(Ok(cur_file)) } }, } }, } }, Some(subdir_runner) => { let inner_file = subdir_runner.next(); match inner_file { None => { // subdir_runner finished his work self.subdir_runner = None; self.next() }, Some(inner_file) => { Some(inner_file) }, } }, } } } impl<'a> DirWalkIter<'a> { pub(in crate::kernel) fn new(dir_path: String) -> Self { Self { runner: DirListIter::new(dir_path, None), subdir_runner: None, no_more: false, } } }
replace_extension
identifier_name
lib.rs
//! [![GitHub time-rs/time](https://img.shields.io/badge/GitHub-time--rs%2Ftime-9b88bb?logo=github&style=for-the-badge)](https://github.com/time-rs/time) //!![license MIT or Apache-2.0](https://img.shields.io/badge/license-MIT%20or%20Apache--2.0-779a6b?style=for-the-badge) //! [![minimum rustc 1.40.0](https://img.shields.io/badge/minimum%20rustc-1.40.0-c18170?logo=rust&style=for-the-badge)](https://www.whatrustisit.com) //! //! # Feature flags //! //! This crate exposes a number of features. These can be enabled or disabled as //! shown [in Cargo's documentation](https://doc.rust-lang.org/cargo/reference/features.html). //! Features are _disabled_ by default unless otherwise noted. //! //! Reliance on a given feature is always indicated alongside the item //! definition. //! //! - `std` (_enabled by default, implicitly enables `alloc`_) //! //! This enables a number of features that depend on the standard library. //! [`Instant`] is the primary item that requires this feature, though some //! others methods may rely on [`Instant`] internally. //! //! - `alloc` (_enabled by default via `std`_) //! //! Enables a number of features that require the ability to dynamically //! allocate memory. //! //! - `macros` //! //! Enables macros that provide compile-time verification of values and //! intuitive syntax. //! //! - `local-offset` (_implicitly enables `std`_) //! //! This feature enables a number of methods that allow obtaining the system's //! UTC offset. //! //! - `large-dates` //! //! By default, only years within the ±9999 range (inclusive) are supported. //! If you need support for years outside this range, consider enabling this //! feature; the supported range will be increased to ±999,999. //! //! Note that enabling this feature has some costs, as it means forgoing some //! optimizations. Ambiguities may be introduced when parsing that would not //! otherwise exist. //! //! - `serde` //! //! Enables [serde](https://docs.rs/serde) support for all types. //! //! - `rand` //! //! Enables [rand](https://docs.rs/rand) support for all types. //! //! - `quickcheck` (_implicitly enables `rand`_) //! //! Enables [quickcheck](https://docs.rs/quickcheck) support for all types except [`Instant`]. #![cfg_attr(__time_03_docs, feature(doc_cfg))] #![cfg_attr(__time_03_docs, deny(broken_intra_doc_links))] #![cfg_attr(not(feature = "std"), no_std)] #![deny( anonymous_parameters, clippy::all, const_err, illegal_floating_point_literal_pattern, late_bound_lifetime_arguments, path_statements, patterns_in_fns_without_body, rust_2018_idioms, trivial_casts, trivial_numeric_casts, unreachable_pub, unsafe_code, unused_extern_crates )] #![warn( clippy::dbg_macro, clippy::decimal_literal_representation, clippy::get_unwrap, clippy::missing_docs_in_private_items, clippy::nursery, clippy::pedantic, clippy::print_stdout, clippy::todo, clippy::unimplemented, clippy::unwrap_used, clippy::use_debug, missing_copy_implementations, missing_debug_implementations, unused_qualifications, variant_size_differences )] #![allow( clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, clippy::enum_glob_use, clippy::map_err_ignore, clippy::missing_errors_doc, clippy::must_use_candidate, clippy::redundant_pub_crate, clippy::wildcard_imports )] #![doc(html_favicon_url = "https://avatars0.githubusercontent.com/u/55999857")] #![doc(html_logo_url = "https://avatars0.githubusercontent.com/u/55999857")] #![doc(test(attr(deny(warnings))))] #[cfg(feature = "alloc")] extern crate alloc; /// Returns `Err(error::ComponentRange)` if the value is not in range. macro_rules! ensure_value_in_range { ($value:ident in $start:expr => $end:expr) => {{ #![allow(trivial_numeric_casts, unused_comparisons)] if $value < $start || $value > $end { return Err(crate::error::ComponentRange { name: stringify!($value), minimum: $start as _, maximum: $end as _, value: $value as _, conditional_range: false, }); } }}; ($value:ident conditionally in $start:expr => $end:expr) => {{ #![allow(trivial_numeric_casts, unused_comparisons)] if $value < $start || $value > $end { return Err(crate::error::ComponentRange { name: stringify!($value), minimum: $start as _, maximum: $end as _, value: $value as _, conditional_range: true, }); } }}; } /// Try to unwrap an expression, returning if not possible. /// /// This is similar to the `?` operator, but does not perform `.into()`. Because /// of this, it is usable in `const` contexts.
Err(error) => return Err(error), } }; } /// Try to unwrap an expression, returning if not possible. /// /// This is similar to the `?` operator, but is usable in `const` contexts. macro_rules! const_try_opt { ($e:expr) => { match $e { Some(value) => value, None => return None, } }; } /// The [`Date`] struct and its associated `impl`s. mod date; /// The [`Duration`] struct and its associated `impl`s. mod duration; /// Various error types returned by methods in the time crate. pub mod error; /// Extension traits. pub mod ext; pub mod format_description; mod formatting; mod hack; /// The [`Instant`] struct and its associated `impl`s. #[cfg(feature = "std")] #[cfg_attr(__time_03_docs, doc(cfg(feature = "std")))] mod instant; /// The [`OffsetDateTime`] struct and its associated `impl`s. mod offset_date_time; /// The [`PrimitiveDateTime`] struct and its associated `impl`s. mod primitive_date_time; #[cfg(feature = "quickcheck")] #[cfg_attr(__time_03_docs, doc(cfg(feature = "quickcheck")))] mod quickcheck; #[cfg(feature = "rand")] #[cfg_attr(__time_03_docs, doc(cfg(feature = "rand")))] mod rand; #[cfg(feature = "serde")] #[cfg_attr(__time_03_docs, doc(cfg(feature = "serde")))] #[allow(missing_copy_implementations, missing_debug_implementations)] pub mod serde; /// The [`Time`] struct and its associated `impl`s. mod time; /// The [`UtcOffset`] struct and its associated `impl`s. mod utc_offset; pub mod util; /// Days of the week. mod weekday; /// Macros to construct statically known values. #[cfg(feature = "macros")] #[cfg_attr(__time_03_docs, doc(cfg(feature = "macros")))] pub mod macros { /// Construct a [`Date`](crate::Date) with a statically known value. /// /// The resulting expression can be used in `const` or `static` declarations. /// /// Three formats are supported: year-week-weekday, year-ordinal, and /// year-month-day. /// /// ```rust /// # use time::{Date, Weekday::*}; /// # use time_macros::date; /// assert_eq!( /// date!("2020-W01-3"), /// Date::from_iso_week_date(2020, 1, Wednesday)? /// ); /// assert_eq!(date!("2020-001"), Date::from_ordinal_date(2020, 1)?); /// assert_eq!(date!("2020-01-01"), Date::from_calendar_date(2020, 1, 1)?); /// # Ok::<_, time::Error>(()) /// ``` pub use time_macros::date; /// Construct a [`PrimitiveDateTime`] or [`OffsetDateTime`] with a /// statically known value. /// /// The resulting expression can be used in `const` or `static` declarations. /// /// The syntax accepted by this macro is the same as [`date!`] and /// [`time!`], with an optional [`offset!`], all space-separated. If an /// [`offset!`] is provided, the resulting value will be an /// [`OffsetDateTime`]; otherwise it will be a [`PrimitiveDateTime`]. /// /// [`date!`]: crate::macros::date /// [`time!`]: crate::macros::time /// [`offset!`]: crate::macros::offset /// [`OffsetDateTime`]: crate::OffsetDateTime /// [`PrimitiveDateTime`]: crate::PrimitiveDateTime pub use time_macros::datetime; /// Construct a [`UtcOffset`](crate::UtcOffset) with a statically known value. /// /// The resulting expression can be used in `const` or `static` declarations. /// /// A sign and the hour must be provided; minutes and seconds default to zero. /// `UTC` (both uppercase and lowercase) is also allowed. /// /// ```rust /// # use time::UtcOffset; /// # use time_macros::offset; /// assert_eq!(offset!("UTC"), UtcOffset::from_hms(0, 0, 0)?); /// assert_eq!(offset!("utc"), UtcOffset::from_hms(0, 0, 0)?); /// assert_eq!(offset!("+0"), UtcOffset::from_hms(0, 0, 0)?); /// assert_eq!(offset!("+1"), UtcOffset::from_hms(1, 0, 0)?); /// assert_eq!(offset!("-1"), UtcOffset::from_hms(-1, 0, 0)?); /// assert_eq!(offset!("+1:30"), UtcOffset::from_hms(1, 30, 0)?); /// assert_eq!(offset!("-1:30"), UtcOffset::from_hms(-1, -30, 0)?); /// assert_eq!(offset!("+1:30:59"), UtcOffset::from_hms(1, 30, 59)?); /// assert_eq!(offset!("-1:30:59"), UtcOffset::from_hms(-1, -30, -59)?); /// assert_eq!(offset!("+23:59:59"), UtcOffset::from_hms(23, 59, 59)?); /// assert_eq!(offset!("-23:59:59"), UtcOffset::from_hms(-23, -59, -59)?); /// # Ok::<_, time::Error>(()) /// ``` pub use time_macros::offset; /// Construct a [`Time`](crate::Time) with a statically known value. /// /// The resulting expression can be used in `const` or `static` declarations. /// /// Hours and minutes must be provided, while seconds defaults to zero. AM/PM is /// allowed (either uppercase or lowercase). Any number of subsecond digits may /// be provided (though any past nine will be discarded). /// /// All components are validated at compile-time. An error will be raised if any /// value is invalid. /// /// ```rust /// # use time::Time; /// # use time_macros::time; /// assert_eq!(time!("0:00"), Time::from_hms(0, 0, 0)?); /// assert_eq!(time!("1:02:03"), Time::from_hms(1, 2, 3)?); /// assert_eq!( /// time!("1:02:03.004_005_006"), /// Time::from_hms_nano(1, 2, 3, 4_005_006)? /// ); /// assert_eq!(time!("12:00 am"), Time::from_hms(0, 0, 0)?); /// assert_eq!(time!("1:02:03 am"), Time::from_hms(1, 2, 3)?); /// assert_eq!( /// time!("1:02:03.004_005_006 am"), /// Time::from_hms_nano(1, 2, 3, 4_005_006)? /// ); /// assert_eq!(time!("12:00 pm"), Time::from_hms(12, 0, 0)?); /// assert_eq!(time!("1:02:03 pm"), Time::from_hms(13, 2, 3)?); /// assert_eq!( /// time!("1:02:03.004_005_006 pm"), /// Time::from_hms_nano(13, 2, 3, 4_005_006)? /// ); /// # Ok::<_, time::Error>(()) /// ``` pub use time_macros::time; } pub use crate::time::Time; pub use date::Date; pub use duration::Duration; pub use error::Error; #[cfg(feature = "std")] pub use instant::Instant; pub use offset_date_time::OffsetDateTime; pub use primitive_date_time::PrimitiveDateTime; pub use utc_offset::UtcOffset; pub use weekday::Weekday; /// An alias for [`std::result::Result`] with a generic error from the time /// crate. pub type Result<T> = core::result::Result<T, Error>;
macro_rules! const_try { ($e:expr) => { match $e { Ok(value) => value,
random_line_split
play15old2.rs
use ndarray::Array2; use rand::Rng; use std::borrow::Borrow; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt::{Display, Formatter}; pub const WIDTH: usize = 4; pub const HEIGHT: usize = 4; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct Board([[u8; WIDTH]; HEIGHT]); #[derive(Debug, Copy, Clone, Default, Eq, PartialEq)] pub struct
; impl Board { pub fn new() -> Self { let mut arr = [[0u8; WIDTH]; HEIGHT]; for y in 0..WIDTH { for x in 0..HEIGHT { arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8 } } Board(arr) } pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> { let w = WIDTH; let h = HEIGHT; let mut tile_count = vec![0; w * h]; for y in 0..HEIGHT { for x in 0..WIDTH { tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1); } } let has_one_of_all = tile_count.iter().all(|x| *x == 1); if has_one_of_all { Ok(Board(arr)) } else { Err(BoardCreateError) } } pub fn size(&self) -> (usize, usize) { (WIDTH, HEIGHT) } pub fn empty_at(&self) -> (usize, usize) { for y in 0..HEIGHT { for x in 0..WIDTH { if self.0[y][x] == 0 { return (x, y); } } } panic!() } #[inline(always)] pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) { let arr = &mut self.0; let t1 = arr[p1.1][p1.0]; let t2 = arr[p2.1][p2.0]; arr[p1.1][p1.0] = t2; arr[p2.1][p2.0] = t1; } pub fn apply(&mut self, dir: Dir) -> Result<(), ()> { let (zx, zy) = self.empty_at(); let (w, h) = self.size(); match dir { Dir::Right if zx < w - 1 => { self.swap((zx, zy), (zx + 1, zy)); Ok(()) } Dir::Down if zy < h - 1 => { self.swap((zx, zy), (zx, zy + 1)); Ok(()) } Dir::Left if zx > 0 => { self.swap((zx, zy), (zx - 1, zy)); Ok(()) } Dir::Up if zy > 0 => { self.swap((zx, zy), (zx, zy - 1)); Ok(()) } _ => Err(()), } } // // pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) { // let (zx, zy) = self.empty_at(); // let w = self.0.shape()[0]; // let h = self.0.shape()[1]; // if zx < w - 1 { // // Направо // let mut b = self.clone(); // b.0.swap((zx, zy), (zx + 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy < h - 1 { // // Вниз // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy + 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zx > 0 { // // Налево // let mut b = self.clone(); // b.0.swap((zx, zy), (zx - 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy > 0 { // // Вверх // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy - 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // } pub fn is_solved(&self) -> bool { let (w, h) = self.size(); for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { return false; } } } true } pub fn can_solve(&self) -> bool { // let (w, h) = self.size(); // let mut flat = Vec::<u8>::with_capacity(w * h); // for y in 0..h { // for x in 0..w { // flat.push(self.0[(x, y)]); // } // } // let (_zx, zy) = self.empty_at(); // let sum: usize = (0..flat.len()) // .map(|i| { // let c = flat[i] as usize; // let c = if c == 0 { w * h } else { c }; // let k = flat[i..] // .iter() // .map(|x| if *x == 0 { (w * h) } else { *x as usize }) // .filter(|x| *x < c) // .count(); // k // }) // .sum(); // let n = sum + zy; // n % 2 == 0 true } pub fn wrong_tiles(&self) -> usize { let (w, h) = self.size(); let mut c = 0; for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { c += 1; } } } c } pub fn solve(&self) -> Result<Path, ()> { if!self.can_solve() { return Err(()); } let mut checked_position_length = HashMap::new(); let mut heap = BinaryHeap::with_capacity(1000); heap.push(QPath(Path::new(self.clone()))); let mut i = 0; loop { i += 1; let current = heap.pop().unwrap(); let last = checked_position_length.get_mut(&current.0.current_board); let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| { heap.retain(|qpath| qpath.0.current_board!= to_remove); }; if i % 10_000 == 0 { println!( "iter = {}e4, path len = {}, euristic = {}, in heap {} el", i / 10_000, current.0.path().len(), current.0.current_board().wrong_tiles(), heap.len() ); } match last { Some(last) if *last <= current.0.path.len() => continue, Some(last) => { *last = current.0.path.len(); //remove_longer(&mut heap, current.0.current_board); } _ => { checked_position_length.insert(current.0.current_board, current.0.path.len()); //remove_longer(&mut heap, current.0.current_board); } } // println!("Current board with {}", current.cost()); if current.0.current_board().is_solved() { return Ok(current.0); } let mut push_or_ignore = |dir| { // Oh... Remove? if heap.len() > 1_000_000 { let mut replacement = BinaryHeap::with_capacity(1_000_005); for _i in 0..10_000 { replacement.push(heap.pop().unwrap()); } heap = replacement; } // ^^^^^^^ let mut c = &current; let path = c.0.push_step_cloned(dir); if let Ok(path) = path { if!checked_position_length.contains_key(path.current_board()) { heap.push(QPath::new(path)); } } }; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0 push_or_ignore(Dir::Up); push_or_ignore(Dir::Right); push_or_ignore(Dir::Down); push_or_ignore(Dir::Left); } } pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] { &self.0 } } impl Display for Board { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let (w, h) = self.size(); for y in 0..h { for x in 0..w { match w * h { 0..=9 => write!(f, "{:1} ", self.0[y][x])?, 10..=99 => write!(f, "{:2} ", self.0[y][x])?, 100..=999 => write!(f, "{:3} ", self.0[y][x])?, _ => panic!(""), }; } writeln!(f)?; } Ok(()) } } #[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] pub enum Dir { Up, Right, Down, Left, } #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub struct Path { current_board: Board, path: Vec<Dir>, } impl Path { pub fn current_board(&self) -> &Board { &self.current_board } pub fn path(&self) -> &Vec<Dir> { &self.path } pub fn len(&self) -> usize { self.path.len() } } impl Path { pub fn new(start_board: Board) -> Self { Self { current_board: start_board, path: Vec::new(), } } pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> { self.current_board.apply(dir).map(|_| self.path.push(dir)) } pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> { let mut board_clone = self.current_board.clone(); board_clone.apply(dir)?; let mut path_clone = self.path.clone(); path_clone.push(dir); Ok(Self { current_board: board_clone, path: path_clone, }) } } #[derive(Clone)] struct QPath(Path); impl QPath { fn new(p: Path) -> Self { Self(p) } pub fn cost(&self) -> usize { let g = self.0.len(); let f = self.0.current_board.wrong_tiles(); // let f: usize = self // .0 // .current_board() // .inner() // .indexed_iter() // .map(|((x, y), v)| { // let (w, h) = self.0.current_board().size(); // let (ox, oy) = if *v == 0 { // (w - 1, h - 1) // } else { // let v = (*v - 1) as usize; // (v % w, v / h) // }; // (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y)) // }) // .sum(); g + f } } impl Ord for QPath { fn cmp(&self, other: &Self) -> Ordering { (other.cost()).cmp(&self.cost()) } } impl PartialOrd for QPath { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for QPath { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for QPath {}
BoardCreateError
identifier_name
play15old2.rs
use ndarray::Array2; use rand::Rng; use std::borrow::Borrow; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt::{Display, Formatter}; pub const WIDTH: usize = 4; pub const HEIGHT: usize = 4; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct Board([[u8; WIDTH]; HEIGHT]); #[derive(Debug, Copy, Clone, Default, Eq, PartialEq)] pub struct BoardCreateError; impl Board { pub fn new() -> Self { let mut arr = [[0u8; WIDTH]; HEIGHT]; for y in 0..WIDTH { for x in 0..HEIGHT { arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8 } } Board(arr) } pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> { let w = WIDTH; let h = HEIGHT; let mut tile_count = vec![0; w * h]; for y in 0..HEIGHT { for x in 0..WIDTH { tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1); } } let has_one_of_all = tile_count.iter().all(|x| *x == 1); if has_one_of_all { Ok(Board(arr)) } else { Err(BoardCreateError) } } pub fn size(&self) -> (usize, usize) { (WIDTH, HEIGHT) } pub fn empty_at(&self) -> (usize, usize)
#[inline(always)] pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) { let arr = &mut self.0; let t1 = arr[p1.1][p1.0]; let t2 = arr[p2.1][p2.0]; arr[p1.1][p1.0] = t2; arr[p2.1][p2.0] = t1; } pub fn apply(&mut self, dir: Dir) -> Result<(), ()> { let (zx, zy) = self.empty_at(); let (w, h) = self.size(); match dir { Dir::Right if zx < w - 1 => { self.swap((zx, zy), (zx + 1, zy)); Ok(()) } Dir::Down if zy < h - 1 => { self.swap((zx, zy), (zx, zy + 1)); Ok(()) } Dir::Left if zx > 0 => { self.swap((zx, zy), (zx - 1, zy)); Ok(()) } Dir::Up if zy > 0 => { self.swap((zx, zy), (zx, zy - 1)); Ok(()) } _ => Err(()), } } // // pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) { // let (zx, zy) = self.empty_at(); // let w = self.0.shape()[0]; // let h = self.0.shape()[1]; // if zx < w - 1 { // // Направо // let mut b = self.clone(); // b.0.swap((zx, zy), (zx + 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy < h - 1 { // // Вниз // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy + 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zx > 0 { // // Налево // let mut b = self.clone(); // b.0.swap((zx, zy), (zx - 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy > 0 { // // Вверх // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy - 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // } pub fn is_solved(&self) -> bool { let (w, h) = self.size(); for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { return false; } } } true } pub fn can_solve(&self) -> bool { // let (w, h) = self.size(); // let mut flat = Vec::<u8>::with_capacity(w * h); // for y in 0..h { // for x in 0..w { // flat.push(self.0[(x, y)]); // } // } // let (_zx, zy) = self.empty_at(); // let sum: usize = (0..flat.len()) // .map(|i| { // let c = flat[i] as usize; // let c = if c == 0 { w * h } else { c }; // let k = flat[i..] // .iter() // .map(|x| if *x == 0 { (w * h) } else { *x as usize }) // .filter(|x| *x < c) // .count(); // k // }) // .sum(); // let n = sum + zy; // n % 2 == 0 true } pub fn wrong_tiles(&self) -> usize { let (w, h) = self.size(); let mut c = 0; for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { c += 1; } } } c } pub fn solve(&self) -> Result<Path, ()> { if!self.can_solve() { return Err(()); } let mut checked_position_length = HashMap::new(); let mut heap = BinaryHeap::with_capacity(1000); heap.push(QPath(Path::new(self.clone()))); let mut i = 0; loop { i += 1; let current = heap.pop().unwrap(); let last = checked_position_length.get_mut(&current.0.current_board); let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| { heap.retain(|qpath| qpath.0.current_board!= to_remove); }; if i % 10_000 == 0 { println!( "iter = {}e4, path len = {}, euristic = {}, in heap {} el", i / 10_000, current.0.path().len(), current.0.current_board().wrong_tiles(), heap.len() ); } match last { Some(last) if *last <= current.0.path.len() => continue, Some(last) => { *last = current.0.path.len(); //remove_longer(&mut heap, current.0.current_board); } _ => { checked_position_length.insert(current.0.current_board, current.0.path.len()); //remove_longer(&mut heap, current.0.current_board); } } // println!("Current board with {}", current.cost()); if current.0.current_board().is_solved() { return Ok(current.0); } let mut push_or_ignore = |dir| { // Oh... Remove? if heap.len() > 1_000_000 { let mut replacement = BinaryHeap::with_capacity(1_000_005); for _i in 0..10_000 { replacement.push(heap.pop().unwrap()); } heap = replacement; } // ^^^^^^^ let mut c = &current; let path = c.0.push_step_cloned(dir); if let Ok(path) = path { if!checked_position_length.contains_key(path.current_board()) { heap.push(QPath::new(path)); } } }; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0 push_or_ignore(Dir::Up); push_or_ignore(Dir::Right); push_or_ignore(Dir::Down); push_or_ignore(Dir::Left); } } pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] { &self.0 } } impl Display for Board { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let (w, h) = self.size(); for y in 0..h { for x in 0..w { match w * h { 0..=9 => write!(f, "{:1} ", self.0[y][x])?, 10..=99 => write!(f, "{:2} ", self.0[y][x])?, 100..=999 => write!(f, "{:3} ", self.0[y][x])?, _ => panic!(""), }; } writeln!(f)?; } Ok(()) } } #[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] pub enum Dir { Up, Right, Down, Left, } #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub struct Path { current_board: Board, path: Vec<Dir>, } impl Path { pub fn current_board(&self) -> &Board { &self.current_board } pub fn path(&self) -> &Vec<Dir> { &self.path } pub fn len(&self) -> usize { self.path.len() } } impl Path { pub fn new(start_board: Board) -> Self { Self { current_board: start_board, path: Vec::new(), } } pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> { self.current_board.apply(dir).map(|_| self.path.push(dir)) } pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> { let mut board_clone = self.current_board.clone(); board_clone.apply(dir)?; let mut path_clone = self.path.clone(); path_clone.push(dir); Ok(Self { current_board: board_clone, path: path_clone, }) } } #[derive(Clone)] struct QPath(Path); impl QPath { fn new(p: Path) -> Self { Self(p) } pub fn cost(&self) -> usize { let g = self.0.len(); let f = self.0.current_board.wrong_tiles(); // let f: usize = self // .0 // .current_board() // .inner() // .indexed_iter() // .map(|((x, y), v)| { // let (w, h) = self.0.current_board().size(); // let (ox, oy) = if *v == 0 { // (w - 1, h - 1) // } else { // let v = (*v - 1) as usize; // (v % w, v / h) // }; // (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y)) // }) // .sum(); g + f } } impl Ord for QPath { fn cmp(&self, other: &Self) -> Ordering { (other.cost()).cmp(&self.cost()) } } impl PartialOrd for QPath { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for QPath { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for QPath {}
{ for y in 0..HEIGHT { for x in 0..WIDTH { if self.0[y][x] == 0 { return (x, y); } } } panic!() }
identifier_body
play15old2.rs
use ndarray::Array2; use rand::Rng; use std::borrow::Borrow; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque}; use std::fmt::{Display, Formatter}; pub const WIDTH: usize = 4; pub const HEIGHT: usize = 4; #[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] pub struct Board([[u8; WIDTH]; HEIGHT]); #[derive(Debug, Copy, Clone, Default, Eq, PartialEq)] pub struct BoardCreateError; impl Board { pub fn new() -> Self { let mut arr = [[0u8; WIDTH]; HEIGHT]; for y in 0..WIDTH { for x in 0..HEIGHT { arr[y][x] = ((y * WIDTH + x + 1) % (WIDTH * HEIGHT)) as u8 } } Board(arr) } pub fn from_array(arr: [[u8; WIDTH]; HEIGHT]) -> Result<Self, BoardCreateError> { let w = WIDTH; let h = HEIGHT; let mut tile_count = vec![0; w * h]; for y in 0..HEIGHT { for x in 0..WIDTH { tile_count.get_mut(arr[y][x] as usize).map(|x| *x += 1); } } let has_one_of_all = tile_count.iter().all(|x| *x == 1); if has_one_of_all { Ok(Board(arr)) } else { Err(BoardCreateError) } } pub fn size(&self) -> (usize, usize) { (WIDTH, HEIGHT) } pub fn empty_at(&self) -> (usize, usize) { for y in 0..HEIGHT { for x in 0..WIDTH { if self.0[y][x] == 0 { return (x, y); } } } panic!() } #[inline(always)] pub fn swap(&mut self, p1: (usize, usize), p2: (usize, usize)) { let arr = &mut self.0; let t1 = arr[p1.1][p1.0]; let t2 = arr[p2.1][p2.0]; arr[p1.1][p1.0] = t2; arr[p2.1][p2.0] = t1; } pub fn apply(&mut self, dir: Dir) -> Result<(), ()> { let (zx, zy) = self.empty_at(); let (w, h) = self.size(); match dir { Dir::Right if zx < w - 1 => { self.swap((zx, zy), (zx + 1, zy)); Ok(()) } Dir::Down if zy < h - 1 => { self.swap((zx, zy), (zx, zy + 1)); Ok(()) } Dir::Left if zx > 0 => { self.swap((zx, zy), (zx - 1, zy)); Ok(()) } Dir::Up if zy > 0 => { self.swap((zx, zy), (zx, zy - 1)); Ok(()) } _ => Err(()), } } // // pub fn possible_steps_with<F: FnMut(Board, u8)>(&self, mut f: F) { // let (zx, zy) = self.empty_at(); // let w = self.0.shape()[0]; // let h = self.0.shape()[1]; // if zx < w - 1 { // // Направо // let mut b = self.clone(); // b.0.swap((zx, zy), (zx + 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy < h - 1 { // // Вниз // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy + 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zx > 0 { // // Налево // let mut b = self.clone(); // b.0.swap((zx, zy), (zx - 1, zy)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // if zy > 0 { // // Вверх // let mut b = self.clone(); // b.0.swap((zx, zy), (zx, zy - 1)); // let moved = b.0[(zx, zy)]; // f(b, moved) // } // } pub fn is_solved(&self) -> bool { let (w, h) = self.size(); for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { return false; } } } true } pub fn can_solve(&self) -> bool { // let (w, h) = self.size(); // let mut flat = Vec::<u8>::with_capacity(w * h); // for y in 0..h { // for x in 0..w { // flat.push(self.0[(x, y)]); // } // } // let (_zx, zy) = self.empty_at(); // let sum: usize = (0..flat.len()) // .map(|i| { // let c = flat[i] as usize; // let c = if c == 0 { w * h } else { c }; // let k = flat[i..] // .iter() // .map(|x| if *x == 0 { (w * h) } else { *x as usize }) // .filter(|x| *x < c) // .count(); // k // }) // .sum(); // let n = sum + zy; // n % 2 == 0 true } pub fn wrong_tiles(&self) -> usize { let (w, h) = self.size(); let mut c = 0; for y in 0..h { for x in 0..w { if ((y * w + x + 1) % (w * h)) as u8!= self.0[y][x] { c += 1; } } } c } pub fn solve(&self) -> Result<Path, ()> { if!self.can_solve() { return Err(()); } let mut checked_position_length = HashMap::new(); let mut heap = BinaryHeap::with_capacity(1000); heap.push(QPath(Path::new(self.clone()))); let mut i = 0; loop { i += 1; let current = heap.pop().unwrap(); let last = checked_position_length.get_mut(&current.0.current_board); let remove_longer = |heap: &mut BinaryHeap<QPath>, to_remove: Board| { heap.retain(|qpath| qpath.0.current_board!= to_remove); }; if i % 10_000 == 0 { println!( "iter = {}e4, path len = {}, euristic = {}, in heap {} el", i / 10_000, current.0.path().len(), current.0.current_board().wrong_tiles(), heap.len() ); } match last { Some(last) if *last <= current.0.path.len() => continue, Some(last) => { *last = current.0.path.len(); //remove_longer(&mut heap, current.0.current_board); } _ => { checked_position_length.insert(current.0.current_board, current.0.path.len()); //remove_longer(&mut heap, current.0.current_board); } } // println!("Current board with {}", current.cost()); if current.0.current_board().is_solved() { return Ok(current.0); } let mut push_or_ignore = |dir| { // Oh... Remove? if heap.len() > 1_000_000 { let mut replacement = BinaryHeap::with_capacity(1_000_005); for _i in 0..10_000 { replacement.push(heap.pop().unwrap()); } heap = replacement; } // ^^^^^^^ let mut c = &current; let path = c.0.push_step_cloned(dir); if let Ok(path) = path { if!checked_position_length.contains_key(path.current_board()) { heap.push(QPath::new(path)); } } }; // 15 2 1 12 8 5 6 11 4 9 10 7 3 14 13 0 push_or_ignore(Dir::Up); push_or_ignore(Dir::Right); push_or_ignore(Dir::Down); push_or_ignore(Dir::Left); } } pub fn inner(&self) -> &[[u8; WIDTH]; HEIGHT] { &self.0 } } impl Display for Board { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let (w, h) = self.size(); for y in 0..h { for x in 0..w { match w * h { 0..=9 => write!(f, "{:1} ", self.0[y][x])?, 10..=99 => write!(f, "{:2} ", self.0[y][x])?, 100..=999 => write!(f, "{:3} ", self.0[y][x])?, _ => panic!(""), }; } writeln!(f)?; } Ok(()) } } #[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] pub enum Dir { Up, Right, Down, Left, } #[derive(Clone, Debug, Hash, Eq, PartialEq)] pub struct Path { current_board: Board, path: Vec<Dir>, } impl Path { pub fn current_board(&self) -> &Board { &self.current_board } pub fn path(&self) -> &Vec<Dir> { &self.path } pub fn len(&self) -> usize { self.path.len() } } impl Path { pub fn new(start_board: Board) -> Self { Self { current_board: start_board, path: Vec::new(), } } pub fn push_step(&mut self, dir: Dir) -> Result<(), ()> { self.current_board.apply(dir).map(|_| self.path.push(dir)) } pub fn push_step_cloned(&self, dir: Dir) -> Result<Self, ()> { let mut board_clone = self.current_board.clone(); board_clone.apply(dir)?; let mut path_clone = self.path.clone(); path_clone.push(dir); Ok(Self { current_board: board_clone, path: path_clone, }) } } #[derive(Clone)] struct QPath(Path); impl QPath { fn new(p: Path) -> Self { Self(p) } pub fn cost(&self) -> usize { let g = self.0.len(); let f = self.0.current_board.wrong_tiles(); // let f: usize = self // .0 // .current_board() // .inner()
// .indexed_iter() // .map(|((x, y), v)| { // let (w, h) = self.0.current_board().size(); // let (ox, oy) = if *v == 0 { // (w - 1, h - 1) // } else { // let v = (*v - 1) as usize; // (v % w, v / h) // }; // (ox.max(x) - ox.min(x)) + (oy.max(y) - oy.min(y)) // }) // .sum(); g + f } } impl Ord for QPath { fn cmp(&self, other: &Self) -> Ordering { (other.cost()).cmp(&self.cost()) } } impl PartialOrd for QPath { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl PartialEq for QPath { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for QPath {}
random_line_split
webhdfs.rs
use webhdfs::*; fn main() { use std::fs::File; use std::path::Path; use std::fs::create_dir_all; use commandline::*; let (mut client, op) = parse_command_line(); match op { Operation::Get(mut fs) => { match &fs[..] { &[ref input] => { let input_path = Path::new(input); let output = input_path.file_name().expect2("file name must be specified if no output file is given"); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } &[ref input, ref output] => { let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } _ => { let target_dir_ = fs.pop().unwrap(); let target_dir = Path::new(&target_dir_); create_dir_all(&target_dir).expect2("Could not create output dir"); for input in fs { let input_path = Path::new(&input); let output_file = input_path.file_name().expect2("file name must be specified if no output file is given"); let output = target_dir.join(&Path::new(output_file)); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } } } } } } fn version() ->! { println!( "{} ({}) version {}", env!("CARGO_PKG_DESCRIPTION"), env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION") ); std::process::exit(0); } fn usage() ->! { println!("USAGE: webhdfs <options>... <command> <files>... webhdfs -h|--help webhdfs -v|--version options: -U|--uri|--url <url> API entrypoint -u|--user <string> User name -d|--doas <string> DoAs username -T|--dt <string> Delegation token -t|--timeout <unsigned> Default timeout in seconds -N|--natmap-file <filepath> Path to NAT mappings file -n|--natmap-entry <k=v> NAT mapping (multiple options are Ok) command and files: -v|--version Print version and exit -h|--help Print this thelp screen and exit --save-config <filepath> Save the effective configuration to the file -g|--get <remote-filepath> <local-path> -g|--get <remote-filepath> -g|--get <remote-filepath>.. <local-dirpath> Get files from HDFS "); std::process::exit(1); } enum Operation { Get(Vec<String>) } fn parse_command_line() -> (SyncHdfsClient, Operation) { use std::time::Duration; use std::collections::HashMap; use commandline::*; enum Sw { Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig } enum Op { Get } struct S { sw: Option<Sw>, op: Option<Op>, files: Vec<String>, uri: Option<String>, user: Option<String>, doas: Option<String>, dtoken: Option<String>, timeout: Option<Duration>, natmap: Option<HashMap<String, String>>, save_config: Option<String>, } let s0 = S { sw: None, op: None, files: vec![], uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None, save_config: None }; let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() { match sw { Sw::Uri => S { uri: Some(arg.arg()),..s }, Sw::User => S { user: Some(arg.arg()),..s }, Sw::Doas => S { doas: Some(arg.arg()),..s }, Sw::DToken => S { dtoken: Some(arg.arg()),..s }, Sw::SaveConfig => S { save_config: Some(arg.arg()),..s }, Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))),..s }, Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")),..s }, Sw::NMEntry => { let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() }; let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry"); nm.insert(k, v); S { natmap: Some(nm),..s } } } } else { match arg.switch_ref() { "-v"|"--version" => version(), "-h"|"--help" => usage(), "-g"|"--get" => S { op: Some(Op::Get),..s }, "-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri),..s }, "-u"|"--user" => S { sw: Some(Sw::User),..s }, "-d"|"--doas" => S { sw: Some(Sw::Doas),..s }, "-T"|"--dt" => S { sw: Some(Sw::DToken),..s }, "-t"|"--timeout" => S { sw: Some(Sw::Timeout),..s }, "-N"|"--natmap-file" => S { sw: Some(Sw::NMFile),..s }, "-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry),..s }, "--save-config" => S { sw: Some(Sw::SaveConfig),..s }, _ => { s.files.push(arg.arg()); s} } }); if result.sw.is_some() { error_exit("invalid command line at the end", "") } if let Some(f) = result.save_config { if result.op.is_some() { error_exit("--save-config must be used alone", "") } let uri = result.uri.expect2("must specify --uri when saving config"); let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI")); config::write_config(&std::path::Path::new(&f), &cfg, true); std::process::exit(0); } else { let operation = if let Some(op) = result.op { op } else { error_exit("must specify operation", "") }; //build context let mut cx = if let Some(uri) = result.uri { SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI")) } else { SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified") }; if let Some(user) = result.user { cx = cx.user_name(user) } if let Some(doas) = result.doas { cx = cx.doas(doas) } if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) } if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) } if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) } let client = cx.build().expect2("Cannot build SyncHdfsClient"); let operation = match operation { Op::Get => if result.files.len() > 0 { Operation::Get(result.files) } else
}; (client, operation) } } //------------------------- mod commandline { /// Prints two-part message to stderr and exits pub fn error_exit(msg: &str, detail: &str) ->! { eprint!("Error: {}", msg); if detail.is_empty() { eprintln!() } else { eprintln!(" ({})", detail); } std::process::exit(1) } /// Expect2 function pub trait Expect2<T> { /// Same as Result::expect but the error message is brief and not intimidating fn expect2(self, msg: &str) -> T; } impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> { fn expect2(self, msg: &str) -> T { match self { Ok(v) => v, Err(e) => error_exit(msg, &e.to_string()) } } } impl<T> Expect2<T> for Option<T> { fn expect2(self, msg: &str) -> T { match self { Some(v) => v, None => error_exit(msg, "") } } } #[derive(Debug)] pub enum CmdLn { Switch(String), Arg(String), Item(String) } impl std::fmt::Display for CmdLn { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s), CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s), CmdLn::Item(s) => write!(fmt, "Item '{}'", s) } } } impl CmdLn { /// Splits command line argruments if needed /// - _ if bypass => Item(_) /// - '--sw=arg' => Switch('--sw') Arg('arg') /// - '-abc' => Item('-a') Item('-b') Item('-c') /// - '--' => *bypass = true; [] /// - _ => Item(_) fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> { use std::iter::FromIterator; if *bypass { vec![CmdLn::Item(v)] } else if v == "--" { *bypass = true; vec![] } else if v.starts_with("--") { let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect(); let a = s.pop(); let b = s.pop(); match (a, b) { (Some(a), None) => vec![CmdLn::Item(a)], (Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)], _ => unreachable!() } } else if v.starts_with("-") && v!= "-" { v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect() } else { vec![CmdLn::Item(v)] } } fn raise(&self, w: &str) ->! { error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error") } /*pub fn switch(self) -> String { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } }*/ pub fn switch_ref(&self) -> &str { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } } pub fn arg(self) -> String { match self { CmdLn::Arg(v) | CmdLn::Item(v) => v, other => other.raise("Arg") } } } /// Parses command line for 0- and 1-argument options. /// `f` consumes the current state and a command line item, and produces the new state. pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S { std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f) } /* pub fn bool_opt(s: String) -> bool { match s.as_ref() { "true"|"+"|"yes" => true, "false"|"-"|"no" => false, v => panic!("invalid bool value '{}'", v) } } */ }
{ error_exit("must specify at least one input file for --get", "") }
conditional_block
webhdfs.rs
use webhdfs::*; fn main() { use std::fs::File; use std::path::Path; use std::fs::create_dir_all; use commandline::*; let (mut client, op) = parse_command_line(); match op { Operation::Get(mut fs) => { match &fs[..] { &[ref input] => { let input_path = Path::new(input); let output = input_path.file_name().expect2("file name must be specified if no output file is given"); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } &[ref input, ref output] => { let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } _ => { let target_dir_ = fs.pop().unwrap(); let target_dir = Path::new(&target_dir_); create_dir_all(&target_dir).expect2("Could not create output dir"); for input in fs { let input_path = Path::new(&input); let output_file = input_path.file_name().expect2("file name must be specified if no output file is given"); let output = target_dir.join(&Path::new(output_file)); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } } } } } } fn version() ->! { println!( "{} ({}) version {}", env!("CARGO_PKG_DESCRIPTION"), env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION") ); std::process::exit(0); } fn usage() ->! { println!("USAGE: webhdfs <options>... <command> <files>... webhdfs -h|--help webhdfs -v|--version options: -U|--uri|--url <url> API entrypoint -u|--user <string> User name -d|--doas <string> DoAs username -T|--dt <string> Delegation token -t|--timeout <unsigned> Default timeout in seconds -N|--natmap-file <filepath> Path to NAT mappings file -n|--natmap-entry <k=v> NAT mapping (multiple options are Ok) command and files: -v|--version Print version and exit -h|--help Print this thelp screen and exit --save-config <filepath> Save the effective configuration to the file -g|--get <remote-filepath> <local-path> -g|--get <remote-filepath> -g|--get <remote-filepath>.. <local-dirpath> Get files from HDFS "); std::process::exit(1); } enum Operation { Get(Vec<String>) } fn parse_command_line() -> (SyncHdfsClient, Operation) { use std::time::Duration; use std::collections::HashMap; use commandline::*; enum Sw { Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig } enum Op { Get } struct S { sw: Option<Sw>, op: Option<Op>, files: Vec<String>, uri: Option<String>, user: Option<String>, doas: Option<String>, dtoken: Option<String>, timeout: Option<Duration>, natmap: Option<HashMap<String, String>>, save_config: Option<String>, } let s0 = S { sw: None, op: None, files: vec![], uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None, save_config: None }; let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() { match sw { Sw::Uri => S { uri: Some(arg.arg()),..s }, Sw::User => S { user: Some(arg.arg()),..s }, Sw::Doas => S { doas: Some(arg.arg()),..s }, Sw::DToken => S { dtoken: Some(arg.arg()),..s }, Sw::SaveConfig => S { save_config: Some(arg.arg()),..s }, Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))),..s }, Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")),..s }, Sw::NMEntry => { let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() }; let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry"); nm.insert(k, v); S { natmap: Some(nm),..s } } } } else { match arg.switch_ref() { "-v"|"--version" => version(), "-h"|"--help" => usage(), "-g"|"--get" => S { op: Some(Op::Get),..s }, "-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri),..s }, "-u"|"--user" => S { sw: Some(Sw::User),..s }, "-d"|"--doas" => S { sw: Some(Sw::Doas),..s }, "-T"|"--dt" => S { sw: Some(Sw::DToken),..s }, "-t"|"--timeout" => S { sw: Some(Sw::Timeout),..s }, "-N"|"--natmap-file" => S { sw: Some(Sw::NMFile),..s }, "-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry),..s }, "--save-config" => S { sw: Some(Sw::SaveConfig),..s }, _ => { s.files.push(arg.arg()); s} } }); if result.sw.is_some() { error_exit("invalid command line at the end", "") } if let Some(f) = result.save_config { if result.op.is_some() { error_exit("--save-config must be used alone", "") } let uri = result.uri.expect2("must specify --uri when saving config"); let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI")); config::write_config(&std::path::Path::new(&f), &cfg, true); std::process::exit(0); } else { let operation = if let Some(op) = result.op { op } else { error_exit("must specify operation", "") }; //build context let mut cx = if let Some(uri) = result.uri { SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI")) } else { SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified") }; if let Some(user) = result.user { cx = cx.user_name(user) } if let Some(doas) = result.doas { cx = cx.doas(doas) } if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) } if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) } if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) } let client = cx.build().expect2("Cannot build SyncHdfsClient"); let operation = match operation { Op::Get => if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") } }; (client, operation) } } //------------------------- mod commandline { /// Prints two-part message to stderr and exits pub fn error_exit(msg: &str, detail: &str) ->! { eprint!("Error: {}", msg); if detail.is_empty() { eprintln!() } else { eprintln!(" ({})", detail); } std::process::exit(1) } /// Expect2 function pub trait Expect2<T> { /// Same as Result::expect but the error message is brief and not intimidating fn expect2(self, msg: &str) -> T; } impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> { fn expect2(self, msg: &str) -> T { match self { Ok(v) => v, Err(e) => error_exit(msg, &e.to_string()) } } } impl<T> Expect2<T> for Option<T> { fn expect2(self, msg: &str) -> T
} #[derive(Debug)] pub enum CmdLn { Switch(String), Arg(String), Item(String) } impl std::fmt::Display for CmdLn { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s), CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s), CmdLn::Item(s) => write!(fmt, "Item '{}'", s) } } } impl CmdLn { /// Splits command line argruments if needed /// - _ if bypass => Item(_) /// - '--sw=arg' => Switch('--sw') Arg('arg') /// - '-abc' => Item('-a') Item('-b') Item('-c') /// - '--' => *bypass = true; [] /// - _ => Item(_) fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> { use std::iter::FromIterator; if *bypass { vec![CmdLn::Item(v)] } else if v == "--" { *bypass = true; vec![] } else if v.starts_with("--") { let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect(); let a = s.pop(); let b = s.pop(); match (a, b) { (Some(a), None) => vec![CmdLn::Item(a)], (Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)], _ => unreachable!() } } else if v.starts_with("-") && v!= "-" { v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect() } else { vec![CmdLn::Item(v)] } } fn raise(&self, w: &str) ->! { error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error") } /*pub fn switch(self) -> String { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } }*/ pub fn switch_ref(&self) -> &str { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } } pub fn arg(self) -> String { match self { CmdLn::Arg(v) | CmdLn::Item(v) => v, other => other.raise("Arg") } } } /// Parses command line for 0- and 1-argument options. /// `f` consumes the current state and a command line item, and produces the new state. pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S { std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f) } /* pub fn bool_opt(s: String) -> bool { match s.as_ref() { "true"|"+"|"yes" => true, "false"|"-"|"no" => false, v => panic!("invalid bool value '{}'", v) } } */ }
{ match self { Some(v) => v, None => error_exit(msg, "") } }
identifier_body
webhdfs.rs
use webhdfs::*; fn main() { use std::fs::File; use std::path::Path; use std::fs::create_dir_all; use commandline::*; let (mut client, op) = parse_command_line(); match op { Operation::Get(mut fs) => { match &fs[..] { &[ref input] => { let input_path = Path::new(input); let output = input_path.file_name().expect2("file name must be specified if no output file is given"); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } &[ref input, ref output] => { let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } _ => { let target_dir_ = fs.pop().unwrap(); let target_dir = Path::new(&target_dir_); create_dir_all(&target_dir).expect2("Could not create output dir"); for input in fs { let input_path = Path::new(&input); let output_file = input_path.file_name().expect2("file name must be specified if no output file is given"); let output = target_dir.join(&Path::new(output_file)); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } } } } } } fn version() ->! { println!( "{} ({}) version {}", env!("CARGO_PKG_DESCRIPTION"), env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION") ); std::process::exit(0); } fn usage() ->! { println!("USAGE: webhdfs <options>... <command> <files>... webhdfs -h|--help webhdfs -v|--version options: -U|--uri|--url <url> API entrypoint -u|--user <string> User name -d|--doas <string> DoAs username -T|--dt <string> Delegation token -t|--timeout <unsigned> Default timeout in seconds -N|--natmap-file <filepath> Path to NAT mappings file -n|--natmap-entry <k=v> NAT mapping (multiple options are Ok) command and files: -v|--version Print version and exit -h|--help Print this thelp screen and exit --save-config <filepath> Save the effective configuration to the file -g|--get <remote-filepath> <local-path> -g|--get <remote-filepath> -g|--get <remote-filepath>.. <local-dirpath> Get files from HDFS "); std::process::exit(1); } enum Operation { Get(Vec<String>) } fn parse_command_line() -> (SyncHdfsClient, Operation) { use std::time::Duration; use std::collections::HashMap; use commandline::*; enum Sw { Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig } enum Op { Get } struct S { sw: Option<Sw>, op: Option<Op>, files: Vec<String>, uri: Option<String>, user: Option<String>, doas: Option<String>, dtoken: Option<String>, timeout: Option<Duration>, natmap: Option<HashMap<String, String>>, save_config: Option<String>, } let s0 = S { sw: None, op: None, files: vec![], uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None, save_config: None }; let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() { match sw { Sw::Uri => S { uri: Some(arg.arg()),..s }, Sw::User => S { user: Some(arg.arg()),..s }, Sw::Doas => S { doas: Some(arg.arg()),..s }, Sw::DToken => S { dtoken: Some(arg.arg()),..s }, Sw::SaveConfig => S { save_config: Some(arg.arg()),..s }, Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))),..s }, Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")),..s }, Sw::NMEntry => { let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() }; let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry"); nm.insert(k, v); S { natmap: Some(nm),..s } } } } else { match arg.switch_ref() { "-v"|"--version" => version(), "-h"|"--help" => usage(), "-g"|"--get" => S { op: Some(Op::Get),..s }, "-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri),..s }, "-u"|"--user" => S { sw: Some(Sw::User),..s }, "-d"|"--doas" => S { sw: Some(Sw::Doas),..s }, "-T"|"--dt" => S { sw: Some(Sw::DToken),..s }, "-t"|"--timeout" => S { sw: Some(Sw::Timeout),..s }, "-N"|"--natmap-file" => S { sw: Some(Sw::NMFile),..s }, "-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry),..s }, "--save-config" => S { sw: Some(Sw::SaveConfig),..s }, _ => { s.files.push(arg.arg()); s} } }); if result.sw.is_some() { error_exit("invalid command line at the end", "") } if let Some(f) = result.save_config { if result.op.is_some() { error_exit("--save-config must be used alone", "") } let uri = result.uri.expect2("must specify --uri when saving config"); let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI")); config::write_config(&std::path::Path::new(&f), &cfg, true); std::process::exit(0); } else { let operation = if let Some(op) = result.op { op } else { error_exit("must specify operation", "") }; //build context let mut cx = if let Some(uri) = result.uri { SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI")) } else { SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified") }; if let Some(user) = result.user { cx = cx.user_name(user) } if let Some(doas) = result.doas { cx = cx.doas(doas) } if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) } if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) } if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) } let client = cx.build().expect2("Cannot build SyncHdfsClient"); let operation = match operation { Op::Get => if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") } }; (client, operation) } } //------------------------- mod commandline { /// Prints two-part message to stderr and exits pub fn error_exit(msg: &str, detail: &str) ->! { eprint!("Error: {}", msg); if detail.is_empty() { eprintln!() } else { eprintln!(" ({})", detail); } std::process::exit(1) } /// Expect2 function pub trait Expect2<T> { /// Same as Result::expect but the error message is brief and not intimidating fn expect2(self, msg: &str) -> T; } impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> { fn expect2(self, msg: &str) -> T { match self { Ok(v) => v, Err(e) => error_exit(msg, &e.to_string()) } } } impl<T> Expect2<T> for Option<T> { fn expect2(self, msg: &str) -> T { match self { Some(v) => v, None => error_exit(msg, "") } } } #[derive(Debug)] pub enum CmdLn {
impl std::fmt::Display for CmdLn { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s), CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s), CmdLn::Item(s) => write!(fmt, "Item '{}'", s) } } } impl CmdLn { /// Splits command line argruments if needed /// - _ if bypass => Item(_) /// - '--sw=arg' => Switch('--sw') Arg('arg') /// - '-abc' => Item('-a') Item('-b') Item('-c') /// - '--' => *bypass = true; [] /// - _ => Item(_) fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> { use std::iter::FromIterator; if *bypass { vec![CmdLn::Item(v)] } else if v == "--" { *bypass = true; vec![] } else if v.starts_with("--") { let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect(); let a = s.pop(); let b = s.pop(); match (a, b) { (Some(a), None) => vec![CmdLn::Item(a)], (Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)], _ => unreachable!() } } else if v.starts_with("-") && v!= "-" { v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect() } else { vec![CmdLn::Item(v)] } } fn raise(&self, w: &str) ->! { error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error") } /*pub fn switch(self) -> String { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } }*/ pub fn switch_ref(&self) -> &str { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } } pub fn arg(self) -> String { match self { CmdLn::Arg(v) | CmdLn::Item(v) => v, other => other.raise("Arg") } } } /// Parses command line for 0- and 1-argument options. /// `f` consumes the current state and a command line item, and produces the new state. pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S { std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f) } /* pub fn bool_opt(s: String) -> bool { match s.as_ref() { "true"|"+"|"yes" => true, "false"|"-"|"no" => false, v => panic!("invalid bool value '{}'", v) } } */ }
Switch(String), Arg(String), Item(String) }
random_line_split
webhdfs.rs
use webhdfs::*; fn main() { use std::fs::File; use std::path::Path; use std::fs::create_dir_all; use commandline::*; let (mut client, op) = parse_command_line(); match op { Operation::Get(mut fs) => { match &fs[..] { &[ref input] => { let input_path = Path::new(input); let output = input_path.file_name().expect2("file name must be specified if no output file is given"); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } &[ref input, ref output] => { let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } _ => { let target_dir_ = fs.pop().unwrap(); let target_dir = Path::new(&target_dir_); create_dir_all(&target_dir).expect2("Could not create output dir"); for input in fs { let input_path = Path::new(&input); let output_file = input_path.file_name().expect2("file name must be specified if no output file is given"); let output = target_dir.join(&Path::new(output_file)); let mut out = File::create(&output).expect2("Could not create output file"); client.get_file(&input, &mut out).expect2("get error") } } } } } } fn
() ->! { println!( "{} ({}) version {}", env!("CARGO_PKG_DESCRIPTION"), env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION") ); std::process::exit(0); } fn usage() ->! { println!("USAGE: webhdfs <options>... <command> <files>... webhdfs -h|--help webhdfs -v|--version options: -U|--uri|--url <url> API entrypoint -u|--user <string> User name -d|--doas <string> DoAs username -T|--dt <string> Delegation token -t|--timeout <unsigned> Default timeout in seconds -N|--natmap-file <filepath> Path to NAT mappings file -n|--natmap-entry <k=v> NAT mapping (multiple options are Ok) command and files: -v|--version Print version and exit -h|--help Print this thelp screen and exit --save-config <filepath> Save the effective configuration to the file -g|--get <remote-filepath> <local-path> -g|--get <remote-filepath> -g|--get <remote-filepath>.. <local-dirpath> Get files from HDFS "); std::process::exit(1); } enum Operation { Get(Vec<String>) } fn parse_command_line() -> (SyncHdfsClient, Operation) { use std::time::Duration; use std::collections::HashMap; use commandline::*; enum Sw { Uri, User, Doas, DToken, Timeout, NMFile, NMEntry, SaveConfig } enum Op { Get } struct S { sw: Option<Sw>, op: Option<Op>, files: Vec<String>, uri: Option<String>, user: Option<String>, doas: Option<String>, dtoken: Option<String>, timeout: Option<Duration>, natmap: Option<HashMap<String, String>>, save_config: Option<String>, } let s0 = S { sw: None, op: None, files: vec![], uri: None, user: None, doas:None, timeout: None, dtoken: None, natmap: None, save_config: None }; let result = commandline::parse_cmdln(s0, |mut s, arg| if let Some(sw) = s.sw.take() { match sw { Sw::Uri => S { uri: Some(arg.arg()),..s }, Sw::User => S { user: Some(arg.arg()),..s }, Sw::Doas => S { doas: Some(arg.arg()),..s }, Sw::DToken => S { dtoken: Some(arg.arg()),..s }, Sw::SaveConfig => S { save_config: Some(arg.arg()),..s }, Sw::Timeout => S { timeout: Some(Duration::from_secs(arg.arg().parse().expect2("Invalid timeout duration"))),..s }, Sw::NMFile => S { natmap: Some(config::read_kv_file(&arg.arg()).expect2("malformed natmap file")),..s }, Sw::NMEntry => { let mut nm = if let Some(nm) = s.natmap { nm } else { HashMap::new() }; let (k, v) = config::split_kv(arg.arg()).expect2("invalid natmap entry"); nm.insert(k, v); S { natmap: Some(nm),..s } } } } else { match arg.switch_ref() { "-v"|"--version" => version(), "-h"|"--help" => usage(), "-g"|"--get" => S { op: Some(Op::Get),..s }, "-U"|"--uri"|"--url" => S { sw: Some(Sw::Uri),..s }, "-u"|"--user" => S { sw: Some(Sw::User),..s }, "-d"|"--doas" => S { sw: Some(Sw::Doas),..s }, "-T"|"--dt" => S { sw: Some(Sw::DToken),..s }, "-t"|"--timeout" => S { sw: Some(Sw::Timeout),..s }, "-N"|"--natmap-file" => S { sw: Some(Sw::NMFile),..s }, "-n"|"--natmap-entry" => S { sw: Some(Sw::NMEntry),..s }, "--save-config" => S { sw: Some(Sw::SaveConfig),..s }, _ => { s.files.push(arg.arg()); s} } }); if result.sw.is_some() { error_exit("invalid command line at the end", "") } if let Some(f) = result.save_config { if result.op.is_some() { error_exit("--save-config must be used alone", "") } let uri = result.uri.expect2("must specify --uri when saving config"); let cfg = config::Config::new(uri.parse().expect2("Cannot parse URI")); config::write_config(&std::path::Path::new(&f), &cfg, true); std::process::exit(0); } else { let operation = if let Some(op) = result.op { op } else { error_exit("must specify operation", "") }; //build context let mut cx = if let Some(uri) = result.uri { SyncHdfsClientBuilder::new(uri.parse().expect2("Cannot parse URI")) } else { SyncHdfsClientBuilder::from_config_opt().expect2("No configuration files were found, and no mandatory options (--uri) were specified") }; if let Some(user) = result.user { cx = cx.user_name(user) } if let Some(doas) = result.doas { cx = cx.doas(doas) } if let Some(timeout) = result.timeout { cx = cx.default_timeout(timeout) } if let Some(natmap) = result.natmap { cx = cx.natmap(NatMap::new(natmap.into_iter()).expect2("Invalid natmap")) } if let Some(dtoken) = result.dtoken { cx = cx.delegation_token(dtoken) } let client = cx.build().expect2("Cannot build SyncHdfsClient"); let operation = match operation { Op::Get => if result.files.len() > 0 { Operation::Get(result.files) } else { error_exit("must specify at least one input file for --get", "") } }; (client, operation) } } //------------------------- mod commandline { /// Prints two-part message to stderr and exits pub fn error_exit(msg: &str, detail: &str) ->! { eprint!("Error: {}", msg); if detail.is_empty() { eprintln!() } else { eprintln!(" ({})", detail); } std::process::exit(1) } /// Expect2 function pub trait Expect2<T> { /// Same as Result::expect but the error message is brief and not intimidating fn expect2(self, msg: &str) -> T; } impl<T, E: std::error::Error> Expect2<T> for std::result::Result<T, E> { fn expect2(self, msg: &str) -> T { match self { Ok(v) => v, Err(e) => error_exit(msg, &e.to_string()) } } } impl<T> Expect2<T> for Option<T> { fn expect2(self, msg: &str) -> T { match self { Some(v) => v, None => error_exit(msg, "") } } } #[derive(Debug)] pub enum CmdLn { Switch(String), Arg(String), Item(String) } impl std::fmt::Display for CmdLn { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CmdLn::Switch(s) => write!(fmt, "Switch '{}'", s), CmdLn::Arg(s) => write!(fmt, "Arg '{}'", s), CmdLn::Item(s) => write!(fmt, "Item '{}'", s) } } } impl CmdLn { /// Splits command line argruments if needed /// - _ if bypass => Item(_) /// - '--sw=arg' => Switch('--sw') Arg('arg') /// - '-abc' => Item('-a') Item('-b') Item('-c') /// - '--' => *bypass = true; [] /// - _ => Item(_) fn convert_arg(bypass: &mut bool, v: String) -> Vec<CmdLn> { use std::iter::FromIterator; if *bypass { vec![CmdLn::Item(v)] } else if v == "--" { *bypass = true; vec![] } else if v.starts_with("--") { let mut s: Vec<String> = v.splitn(2, "=").map(|r| r.to_string()).collect(); let a = s.pop(); let b = s.pop(); match (a, b) { (Some(a), None) => vec![CmdLn::Item(a)], (Some(b), Some(a)) => vec![CmdLn::Switch(a), CmdLn::Arg(b)], _ => unreachable!() } } else if v.starts_with("-") && v!= "-" { v.chars().skip(1).map(|c| CmdLn::Item(String::from_iter(vec!['-', c]))).collect() } else { vec![CmdLn::Item(v)] } } fn raise(&self, w: &str) ->! { error_exit(&format!("we wanted {}, but got {:?}", w, self), "command line syntax error") } /*pub fn switch(self) -> String { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } }*/ pub fn switch_ref(&self) -> &str { match self { CmdLn::Switch(v) | CmdLn::Item(v) => v, other => other.raise("Switch") } } pub fn arg(self) -> String { match self { CmdLn::Arg(v) | CmdLn::Item(v) => v, other => other.raise("Arg") } } } /// Parses command line for 0- and 1-argument options. /// `f` consumes the current state and a command line item, and produces the new state. pub fn parse_cmdln<S, F>(s0: S, f: F) -> S where F: FnMut(S, CmdLn) -> S { std::env::args().skip(1).scan(false, |s, a| Some(CmdLn::convert_arg(s, a))).flatten().fold(s0, f) } /* pub fn bool_opt(s: String) -> bool { match s.as_ref() { "true"|"+"|"yes" => true, "false"|"-"|"no" => false, v => panic!("invalid bool value '{}'", v) } } */ }
version
identifier_name
prefilter.rs
ystack: &[u8], span: Span) -> Candidate; } impl<P: PrefilterI +?Sized> PrefilterI for Arc<P> { #[inline(always)] fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { (**self).find_in(haystack, span) } } /// A builder for constructing the best possible prefilter. When constructed, /// this builder will heuristically select the best prefilter it can build, /// if any, and discard the rest. #[derive(Debug)] pub(crate) struct Builder { count: usize, ascii_case_insensitive: bool, start_bytes: StartBytesBuilder, rare_bytes: RareBytesBuilder, memmem: MemmemBuilder, packed: Option<packed::Builder>, // If we run across a condition that suggests we shouldn't use a prefilter // at all (like an empty pattern), then disable prefilters entirely. enabled: bool, } impl Builder { /// Create a new builder for constructing the best possible prefilter. pub(crate) fn new(kind: MatchKind) -> Builder { let pbuilder = kind .as_packed() .map(|kind| packed::Config::new().match_kind(kind).builder()); Builder { count: 0, ascii_case_insensitive: false, start_bytes: StartBytesBuilder::new(), rare_bytes: RareBytesBuilder::new(), memmem: MemmemBuilder::default(), packed: pbuilder, enabled: true, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { self.ascii_case_insensitive = yes; self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); self } /// Return a prefilter suitable for quickly finding potential matches. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. pub(crate) fn build(&self) -> Option<Prefilter> { if!self.enabled { return None; } // If we only have one pattern, then deferring to memmem is always // the best choice. This is kind of a weird case, because, well, why // use Aho-Corasick if you only have one pattern? But maybe you don't // know exactly how many patterns you'll get up front, and you need to // support the option of multiple patterns. So instead of relying on // the caller to branch and use memmem explicitly, we just do it for // them. if!self.ascii_case_insensitive { if let Some(pre) = self.memmem.build() { return Some(pre); } } match (self.start_bytes.build(), self.rare_bytes.build()) { // If we could build both start and rare prefilters, then there are // a few cases in which we'd want to use the start-byte prefilter // over the rare-byte prefilter, since the former has lower // overhead. (prestart @ Some(_), prerare @ Some(_)) => { // If the start-byte prefilter can scan for a smaller number // of bytes than the rare-byte prefilter, then it's probably // faster. let has_fewer_bytes = self.start_bytes.count < self.rare_bytes.count; // Otherwise, if the combined frequency rank of the detected // bytes in the start-byte prefilter is "close" to the combined // frequency rank of the rare-byte prefilter, then we pick // the start-byte prefilter even if the rare-byte prefilter // heuristically searches for rare bytes. This is because the // rare-byte prefilter has higher constant costs, so we tend to // prefer the start-byte prefilter when we can. let has_rarer_bytes = self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; if has_fewer_bytes || has_rarer_bytes { prestart } else { prerare } } (prestart @ Some(_), None) => prestart, (None, prerare @ Some(_)) => prerare, (None, None) if self.ascii_case_insensitive => None, (None, None) => { self.packed.as_ref().and_then(|b| b.build()).map(|s| { let memory_usage = s.memory_usage(); Prefilter { finder: Arc::new(Packed(s)), memory_usage } }) } } } /// Add a literal string to this prefilter builder. pub(crate) fn add(&mut self, bytes: &[u8]) { if bytes.is_empty() { self.enabled = false; } if!self.enabled { return; } self.count += 1; self.start_bytes.add(bytes); self.rare_bytes.add(bytes); self.memmem.add(bytes); if let Some(ref mut pbuilder) = self.packed { pbuilder.add(bytes); } } } /// A type that wraps a packed searcher and implements the `Prefilter` /// interface. #[derive(Clone, Debug)] struct Packed(packed::Searcher); impl PrefilterI for Packed { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { self.0 .find_in(&haystack, span) .map_or(Candidate::None, Candidate::Match) } } /// A builder for constructing a prefilter that uses memmem. #[derive(Debug, Default)] struct MemmemBuilder { /// The number of patterns that have been added. count: usize, /// The singular pattern to search for. This is only set when count==1. one: Option<Vec<u8>>, } impl MemmemBuilder { fn build(&self) -> Option<Prefilter> { #[cfg(all(feature = "std", feature = "perf-literal"))] fn imp(builder: &MemmemBuilder) -> Option<Prefilter> { let pattern = builder.one.as_ref()?; assert_eq!(1, builder.count); let finder = Arc::new(Memmem( memchr::memmem::Finder::new(pattern).into_owned(), )); let memory_usage = pattern.len(); Some(Prefilter { finder, memory_usage }) } #[cfg(not(all(feature = "std", feature = "perf-literal")))] fn imp(_: &MemmemBuilder) -> Option<Prefilter> { None } imp(self) } fn add(&mut self, bytes: &[u8]) { self.count += 1; if self.count == 1 { self.one = Some(bytes.to_vec()); } else { self.one = None; } } } /// A type that wraps a SIMD accelerated single substring search from the /// `memchr` crate for use as a prefilter. /// /// Currently, this prefilter is only active for Aho-Corasick searchers with /// a single pattern. In theory, this could be extended to support searchers /// that have a common prefix of more than one byte (for one byte, we would use /// memchr), but it's not clear if it's worth it or not. /// /// Also, unfortunately, this currently also requires the'std' feature to /// be enabled. That's because memchr doesn't have a no-std-but-with-alloc /// mode, and so APIs like Finder::into_owned aren't available when'std' is /// disabled. But there should be an 'alloc' feature that brings in APIs like /// Finder::into_owned but doesn't use std-only features like runtime CPU /// feature detection. #[cfg(all(feature = "std", feature = "perf-literal"))] #[derive(Clone, Debug)] struct Memmem(memchr::memmem::Finder<'static>); #[cfg(all(feature = "std", feature = "perf-literal"))] impl PrefilterI for Memmem { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { use crate::util::primitives::PatternID; self.0.find(&haystack[span]).map_or(Candidate::None, |i| { let start = span.start + i; let end = start + self.0.needle().len(); // N.B. We can declare a match and use a fixed pattern ID here // because a Memmem prefilter is only ever created for searchers // with exactly one pattern. Thus, every match is always a match // and it is always for the first and only pattern. Candidate::Match(Match::new(PatternID::ZERO, start..end)) }) } } /// A builder for constructing a rare byte prefilter. /// /// A rare byte prefilter attempts to pick out a small set of rare bytes that /// occurr in the patterns, and then quickly scan to matches of those rare /// bytes. #[derive(Clone, Debug)] struct
{ /// Whether this prefilter should account for ASCII case insensitivity or /// not. ascii_case_insensitive: bool, /// A set of rare bytes, indexed by byte value. rare_set: ByteSet, /// A set of byte offsets associated with bytes in a pattern. An entry /// corresponds to a particular bytes (its index) and is only non-zero if /// the byte occurred at an offset greater than 0 in at least one pattern. /// /// If a byte's offset is not representable in 8 bits, then the rare bytes /// prefilter becomes inert. byte_offsets: RareByteOffsets, /// Whether this is available as a prefilter or not. This can be set to /// false during construction if a condition is seen that invalidates the /// use of the rare-byte prefilter. available: bool, /// The number of bytes set to an active value in `byte_offsets`. count: usize, /// The sum of frequency ranks for the rare bytes detected. This is /// intended to give a heuristic notion of how rare the bytes are. rank_sum: u16, } /// A set of byte offsets, keyed by byte. #[derive(Clone, Copy)] struct RareByteOffsets { /// Each entry corresponds to the maximum offset of the corresponding /// byte across all patterns seen. set: [RareByteOffset; 256], } impl RareByteOffsets { /// Create a new empty set of rare byte offsets. pub(crate) fn empty() -> RareByteOffsets { RareByteOffsets { set: [RareByteOffset::default(); 256] } } /// Add the given offset for the given byte to this set. If the offset is /// greater than the existing offset, then it overwrites the previous /// value and returns false. If there is no previous value set, then this /// sets it and returns true. pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { self.set[byte as usize].max = cmp::max(self.set[byte as usize].max, off.max); } } impl core::fmt::Debug for RareByteOffsets { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut offsets = vec![]; for off in self.set.iter() { if off.max > 0 { offsets.push(off); } } f.debug_struct("RareByteOffsets").field("set", &offsets).finish() } } /// Offsets associated with an occurrence of a "rare" byte in any of the /// patterns used to construct a single Aho-Corasick automaton. #[derive(Clone, Copy, Debug)] struct RareByteOffset { /// The maximum offset at which a particular byte occurs from the start /// of any pattern. This is used as a shift amount. That is, when an /// occurrence of this byte is found, the candidate position reported by /// the prefilter is `position_of_byte - max`, such that the automaton /// will begin its search at a position that is guaranteed to observe a /// match. /// /// To avoid accidentally quadratic behavior, a prefilter is considered /// ineffective when it is asked to start scanning from a position that it /// has already scanned past. /// /// Using a `u8` here means that if we ever see a pattern that's longer /// than 255 bytes, then the entire rare byte prefilter is disabled. max: u8, } impl Default for RareByteOffset { fn default() -> RareByteOffset { RareByteOffset { max: 0 } } } impl RareByteOffset { /// Create a new rare byte offset. If the given offset is too big, then /// None is returned. In that case, callers should render the rare bytes /// prefilter inert. fn new(max: usize) -> Option<RareByteOffset> { if max > u8::MAX as usize { None } else { Some(RareByteOffset { max: max as u8 }) } } } impl RareBytesBuilder { /// Create a new builder for constructing a rare byte prefilter. fn new() -> RareBytesBuilder { RareBytesBuilder { ascii_case_insensitive: false, rare_set: ByteSet::empty(), byte_offsets: RareByteOffsets::empty(), available: true, count: 0, rank_sum: 0, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { self.ascii_case_insensitive = yes; self } /// Build the rare bytes prefilter. /// /// If there are more than 3 distinct rare bytes found, or if heuristics /// otherwise determine that this prefilter should not be used, then `None` /// is returned. fn build(&self) -> Option<Prefilter> { #[cfg(feature = "perf-literal")] fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> { if!builder.available || builder.count > 3 { return None; } let (mut bytes, mut len) = ([0; 3], 0); for b in 0..=255 { if builder.rare_set.contains(b) { bytes[len] = b as u8; len += 1; } } let finder: Arc<dyn PrefilterI> = match len { 0 => return None, 1 => Arc::new(RareBytesOne { byte1: bytes[0], offset: builder.byte_offsets.set[bytes[0] as usize], }), 2 => Arc::new(RareBytesTwo { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], }), 3 => Arc::new(RareBytesThree { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], byte3: bytes[2], }), _ => unreachable!(), }; Some(Prefilter { finder, memory_usage: 0 }) } #[cfg(not(feature = "perf-literal"))] fn imp(_: &RareBytesBuilder) -> Option<Prefilter> { None } imp(self) } /// Add a byte string to this builder. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. fn add(&mut self, bytes: &[u8]) { // If we've already given up, then do nothing. if!self.available { return; } // If we've already blown our budget, then don't waste time looking // for more rare bytes. if self.count > 3 { self.available = false; return; } // If the pattern is too long, then our offset table is bunk, so // give up. if bytes.len() >= 256 { self.available = false; return; } let mut rarest = match bytes.get(0) { None => return, Some(&b) => (b, freq_rank(b)), }; // The idea here is to look for the rarest byte in each pattern, and // add that to our set. As a special exception, if we see a byte that // we've already added, then we immediately stop and choose that byte, // even if there's another rare byte in the pattern. This helps us // apply the rare byte optimization in more cases by attempting to pick // bytes that are in common between patterns. So for example, if we // were searching for `Sherlock` and `lockjaw`, then this would pick // `k` for both patterns, resulting in the use of `memchr` instead of // `memchr2` for `k` and `j`. let mut found = false; for (pos, &b) in bytes.iter().enumerate() { self.set_offset(pos, b); if found { continue; } if self.rare_set.contains(b) { found = true; continue; } let rank = freq_rank(b); if rank < rarest.1 { rarest = (b, rank); } } if!found { self.add_rare_byte(rarest.0); } } fn set_offset(&mut self, pos: usize, byte: u8) { // This unwrap is OK because pos is never bigger than our max. let offset = RareByteOffset::new(pos).unwrap(); self.byte_offsets.set(byte, offset); if self.ascii_case_insensitive { self.byte_offsets.set(opposite_ascii_case(byte), offset); } } fn add_rare_byte(&mut self, byte: u8) { self.add_one_rare_byte(byte); if self.ascii_case_insensitive { self.add_one_rare_byte(opposite_ascii_case(byte)); } } fn add_one_rare_byte(&mut self, byte: u8) { if!self.rare_set.contains(byte) { self.rare_set.add(byte); self.count += 1; self.rank_sum += freq_rank(byte) as u16; } } } /// A prefilter for scanning for a single "rare" byte. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesOne { byte1: u8, offset: RareByteOffset, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesOne { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr(self.byte1, &haystack[span]) .map(|i| { let pos = span.start + i; cmp::max( span.start, pos.saturating_sub(usize::from(self.offset.max)), ) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for two "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesTwo { offsets: RareByteOffsets, byte1: u8, byte2: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesTwo { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr2(self.byte1, self.byte2, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for three "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesThree { offsets: RareByteOffsets, byte1: u8, byte2: u8, byte3: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesThree { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate
RareBytesBuilder
identifier_name
prefilter.rs
ystack: &[u8], span: Span) -> Candidate; } impl<P: PrefilterI +?Sized> PrefilterI for Arc<P> { #[inline(always)] fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { (**self).find_in(haystack, span) } } /// A builder for constructing the best possible prefilter. When constructed, /// this builder will heuristically select the best prefilter it can build, /// if any, and discard the rest. #[derive(Debug)] pub(crate) struct Builder { count: usize, ascii_case_insensitive: bool, start_bytes: StartBytesBuilder, rare_bytes: RareBytesBuilder, memmem: MemmemBuilder, packed: Option<packed::Builder>, // If we run across a condition that suggests we shouldn't use a prefilter // at all (like an empty pattern), then disable prefilters entirely. enabled: bool, } impl Builder { /// Create a new builder for constructing the best possible prefilter. pub(crate) fn new(kind: MatchKind) -> Builder { let pbuilder = kind .as_packed() .map(|kind| packed::Config::new().match_kind(kind).builder()); Builder { count: 0, ascii_case_insensitive: false, start_bytes: StartBytesBuilder::new(), rare_bytes: RareBytesBuilder::new(), memmem: MemmemBuilder::default(), packed: pbuilder, enabled: true, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { self.ascii_case_insensitive = yes; self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); self } /// Return a prefilter suitable for quickly finding potential matches. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. pub(crate) fn build(&self) -> Option<Prefilter> { if!self.enabled { return None; } // If we only have one pattern, then deferring to memmem is always // the best choice. This is kind of a weird case, because, well, why // use Aho-Corasick if you only have one pattern? But maybe you don't // know exactly how many patterns you'll get up front, and you need to // support the option of multiple patterns. So instead of relying on // the caller to branch and use memmem explicitly, we just do it for // them. if!self.ascii_case_insensitive { if let Some(pre) = self.memmem.build() { return Some(pre); } } match (self.start_bytes.build(), self.rare_bytes.build()) { // If we could build both start and rare prefilters, then there are // a few cases in which we'd want to use the start-byte prefilter // over the rare-byte prefilter, since the former has lower // overhead. (prestart @ Some(_), prerare @ Some(_)) => { // If the start-byte prefilter can scan for a smaller number // of bytes than the rare-byte prefilter, then it's probably // faster. let has_fewer_bytes = self.start_bytes.count < self.rare_bytes.count; // Otherwise, if the combined frequency rank of the detected // bytes in the start-byte prefilter is "close" to the combined // frequency rank of the rare-byte prefilter, then we pick // the start-byte prefilter even if the rare-byte prefilter // heuristically searches for rare bytes. This is because the // rare-byte prefilter has higher constant costs, so we tend to // prefer the start-byte prefilter when we can. let has_rarer_bytes = self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; if has_fewer_bytes || has_rarer_bytes { prestart } else { prerare } } (prestart @ Some(_), None) => prestart, (None, prerare @ Some(_)) => prerare, (None, None) if self.ascii_case_insensitive => None, (None, None) => { self.packed.as_ref().and_then(|b| b.build()).map(|s| { let memory_usage = s.memory_usage(); Prefilter { finder: Arc::new(Packed(s)), memory_usage } }) } } } /// Add a literal string to this prefilter builder. pub(crate) fn add(&mut self, bytes: &[u8]) { if bytes.is_empty() { self.enabled = false; } if!self.enabled { return; } self.count += 1; self.start_bytes.add(bytes); self.rare_bytes.add(bytes); self.memmem.add(bytes); if let Some(ref mut pbuilder) = self.packed { pbuilder.add(bytes); } } } /// A type that wraps a packed searcher and implements the `Prefilter` /// interface. #[derive(Clone, Debug)] struct Packed(packed::Searcher); impl PrefilterI for Packed { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { self.0 .find_in(&haystack, span) .map_or(Candidate::None, Candidate::Match) } } /// A builder for constructing a prefilter that uses memmem. #[derive(Debug, Default)] struct MemmemBuilder { /// The number of patterns that have been added. count: usize, /// The singular pattern to search for. This is only set when count==1. one: Option<Vec<u8>>, } impl MemmemBuilder { fn build(&self) -> Option<Prefilter> { #[cfg(all(feature = "std", feature = "perf-literal"))] fn imp(builder: &MemmemBuilder) -> Option<Prefilter> { let pattern = builder.one.as_ref()?; assert_eq!(1, builder.count); let finder = Arc::new(Memmem( memchr::memmem::Finder::new(pattern).into_owned(), )); let memory_usage = pattern.len(); Some(Prefilter { finder, memory_usage }) } #[cfg(not(all(feature = "std", feature = "perf-literal")))] fn imp(_: &MemmemBuilder) -> Option<Prefilter> { None } imp(self) } fn add(&mut self, bytes: &[u8]) { self.count += 1; if self.count == 1 { self.one = Some(bytes.to_vec()); } else { self.one = None; } } } /// A type that wraps a SIMD accelerated single substring search from the /// `memchr` crate for use as a prefilter. /// /// Currently, this prefilter is only active for Aho-Corasick searchers with /// a single pattern. In theory, this could be extended to support searchers /// that have a common prefix of more than one byte (for one byte, we would use /// memchr), but it's not clear if it's worth it or not. /// /// Also, unfortunately, this currently also requires the'std' feature to /// be enabled. That's because memchr doesn't have a no-std-but-with-alloc /// mode, and so APIs like Finder::into_owned aren't available when'std' is /// disabled. But there should be an 'alloc' feature that brings in APIs like /// Finder::into_owned but doesn't use std-only features like runtime CPU /// feature detection. #[cfg(all(feature = "std", feature = "perf-literal"))] #[derive(Clone, Debug)] struct Memmem(memchr::memmem::Finder<'static>); #[cfg(all(feature = "std", feature = "perf-literal"))] impl PrefilterI for Memmem { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { use crate::util::primitives::PatternID; self.0.find(&haystack[span]).map_or(Candidate::None, |i| { let start = span.start + i; let end = start + self.0.needle().len(); // N.B. We can declare a match and use a fixed pattern ID here // because a Memmem prefilter is only ever created for searchers // with exactly one pattern. Thus, every match is always a match // and it is always for the first and only pattern. Candidate::Match(Match::new(PatternID::ZERO, start..end)) }) } } /// A builder for constructing a rare byte prefilter. /// /// A rare byte prefilter attempts to pick out a small set of rare bytes that /// occurr in the patterns, and then quickly scan to matches of those rare /// bytes. #[derive(Clone, Debug)] struct RareBytesBuilder { /// Whether this prefilter should account for ASCII case insensitivity or /// not. ascii_case_insensitive: bool, /// A set of rare bytes, indexed by byte value. rare_set: ByteSet, /// A set of byte offsets associated with bytes in a pattern. An entry /// corresponds to a particular bytes (its index) and is only non-zero if /// the byte occurred at an offset greater than 0 in at least one pattern. /// /// If a byte's offset is not representable in 8 bits, then the rare bytes /// prefilter becomes inert. byte_offsets: RareByteOffsets, /// Whether this is available as a prefilter or not. This can be set to /// false during construction if a condition is seen that invalidates the /// use of the rare-byte prefilter. available: bool, /// The number of bytes set to an active value in `byte_offsets`. count: usize, /// The sum of frequency ranks for the rare bytes detected. This is /// intended to give a heuristic notion of how rare the bytes are. rank_sum: u16, } /// A set of byte offsets, keyed by byte. #[derive(Clone, Copy)] struct RareByteOffsets { /// Each entry corresponds to the maximum offset of the corresponding /// byte across all patterns seen. set: [RareByteOffset; 256], } impl RareByteOffsets { /// Create a new empty set of rare byte offsets. pub(crate) fn empty() -> RareByteOffsets { RareByteOffsets { set: [RareByteOffset::default(); 256] } } /// Add the given offset for the given byte to this set. If the offset is /// greater than the existing offset, then it overwrites the previous /// value and returns false. If there is no previous value set, then this /// sets it and returns true. pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { self.set[byte as usize].max = cmp::max(self.set[byte as usize].max, off.max); } } impl core::fmt::Debug for RareByteOffsets { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut offsets = vec![]; for off in self.set.iter() { if off.max > 0 { offsets.push(off); } } f.debug_struct("RareByteOffsets").field("set", &offsets).finish() } } /// Offsets associated with an occurrence of a "rare" byte in any of the /// patterns used to construct a single Aho-Corasick automaton. #[derive(Clone, Copy, Debug)] struct RareByteOffset { /// The maximum offset at which a particular byte occurs from the start /// of any pattern. This is used as a shift amount. That is, when an /// occurrence of this byte is found, the candidate position reported by /// the prefilter is `position_of_byte - max`, such that the automaton /// will begin its search at a position that is guaranteed to observe a /// match. /// /// To avoid accidentally quadratic behavior, a prefilter is considered /// ineffective when it is asked to start scanning from a position that it /// has already scanned past. /// /// Using a `u8` here means that if we ever see a pattern that's longer /// than 255 bytes, then the entire rare byte prefilter is disabled. max: u8, } impl Default for RareByteOffset { fn default() -> RareByteOffset { RareByteOffset { max: 0 } } } impl RareByteOffset { /// Create a new rare byte offset. If the given offset is too big, then /// None is returned. In that case, callers should render the rare bytes /// prefilter inert. fn new(max: usize) -> Option<RareByteOffset> { if max > u8::MAX as usize { None } else { Some(RareByteOffset { max: max as u8 }) } } } impl RareBytesBuilder { /// Create a new builder for constructing a rare byte prefilter. fn new() -> RareBytesBuilder { RareBytesBuilder { ascii_case_insensitive: false, rare_set: ByteSet::empty(), byte_offsets: RareByteOffsets::empty(), available: true, count: 0, rank_sum: 0, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { self.ascii_case_insensitive = yes; self } /// Build the rare bytes prefilter. /// /// If there are more than 3 distinct rare bytes found, or if heuristics /// otherwise determine that this prefilter should not be used, then `None` /// is returned. fn build(&self) -> Option<Prefilter> { #[cfg(feature = "perf-literal")] fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> { if!builder.available || builder.count > 3 { return None; } let (mut bytes, mut len) = ([0; 3], 0); for b in 0..=255 { if builder.rare_set.contains(b) { bytes[len] = b as u8; len += 1; } } let finder: Arc<dyn PrefilterI> = match len { 0 => return None, 1 => Arc::new(RareBytesOne { byte1: bytes[0], offset: builder.byte_offsets.set[bytes[0] as usize], }), 2 => Arc::new(RareBytesTwo { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], }), 3 => Arc::new(RareBytesThree { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], byte3: bytes[2], }), _ => unreachable!(), }; Some(Prefilter { finder, memory_usage: 0 }) } #[cfg(not(feature = "perf-literal"))] fn imp(_: &RareBytesBuilder) -> Option<Prefilter> { None } imp(self) } /// Add a byte string to this builder. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. fn add(&mut self, bytes: &[u8]) { // If we've already given up, then do nothing. if!self.available { return; } // If we've already blown our budget, then don't waste time looking // for more rare bytes. if self.count > 3 { self.available = false; return; } // If the pattern is too long, then our offset table is bunk, so // give up. if bytes.len() >= 256 { self.available = false; return; } let mut rarest = match bytes.get(0) { None => return, Some(&b) => (b, freq_rank(b)), }; // The idea here is to look for the rarest byte in each pattern, and // add that to our set. As a special exception, if we see a byte that // we've already added, then we immediately stop and choose that byte, // even if there's another rare byte in the pattern. This helps us // apply the rare byte optimization in more cases by attempting to pick // bytes that are in common between patterns. So for example, if we // were searching for `Sherlock` and `lockjaw`, then this would pick // `k` for both patterns, resulting in the use of `memchr` instead of // `memchr2` for `k` and `j`. let mut found = false; for (pos, &b) in bytes.iter().enumerate() { self.set_offset(pos, b); if found { continue; } if self.rare_set.contains(b) { found = true; continue; } let rank = freq_rank(b); if rank < rarest.1 { rarest = (b, rank); } } if!found { self.add_rare_byte(rarest.0); } } fn set_offset(&mut self, pos: usize, byte: u8) { // This unwrap is OK because pos is never bigger than our max. let offset = RareByteOffset::new(pos).unwrap(); self.byte_offsets.set(byte, offset); if self.ascii_case_insensitive { self.byte_offsets.set(opposite_ascii_case(byte), offset); } } fn add_rare_byte(&mut self, byte: u8) { self.add_one_rare_byte(byte); if self.ascii_case_insensitive { self.add_one_rare_byte(opposite_ascii_case(byte)); } } fn add_one_rare_byte(&mut self, byte: u8) { if!self.rare_set.contains(byte)
} } /// A prefilter for scanning for a single "rare" byte. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesOne { byte1: u8, offset: RareByteOffset, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesOne { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr(self.byte1, &haystack[span]) .map(|i| { let pos = span.start + i; cmp::max( span.start, pos.saturating_sub(usize::from(self.offset.max)), ) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for two "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesTwo { offsets: RareByteOffsets, byte1: u8, byte2: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesTwo { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr2(self.byte1, self.byte2, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for three "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesThree { offsets: RareByteOffsets, byte1: u8, byte2: u8, byte3: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesThree { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate
{ self.rare_set.add(byte); self.count += 1; self.rank_sum += freq_rank(byte) as u16; }
conditional_block
prefilter.rs
ystack: &[u8], span: Span) -> Candidate; } impl<P: PrefilterI +?Sized> PrefilterI for Arc<P> { #[inline(always)] fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { (**self).find_in(haystack, span) } } /// A builder for constructing the best possible prefilter. When constructed, /// this builder will heuristically select the best prefilter it can build, /// if any, and discard the rest. #[derive(Debug)] pub(crate) struct Builder { count: usize, ascii_case_insensitive: bool, start_bytes: StartBytesBuilder, rare_bytes: RareBytesBuilder, memmem: MemmemBuilder, packed: Option<packed::Builder>, // If we run across a condition that suggests we shouldn't use a prefilter // at all (like an empty pattern), then disable prefilters entirely. enabled: bool, } impl Builder { /// Create a new builder for constructing the best possible prefilter. pub(crate) fn new(kind: MatchKind) -> Builder { let pbuilder = kind .as_packed() .map(|kind| packed::Config::new().match_kind(kind).builder()); Builder { count: 0, ascii_case_insensitive: false, start_bytes: StartBytesBuilder::new(), rare_bytes: RareBytesBuilder::new(), memmem: MemmemBuilder::default(), packed: pbuilder, enabled: true, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { self.ascii_case_insensitive = yes; self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); self } /// Return a prefilter suitable for quickly finding potential matches. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. pub(crate) fn build(&self) -> Option<Prefilter> { if!self.enabled { return None; } // If we only have one pattern, then deferring to memmem is always // the best choice. This is kind of a weird case, because, well, why // use Aho-Corasick if you only have one pattern? But maybe you don't // know exactly how many patterns you'll get up front, and you need to // support the option of multiple patterns. So instead of relying on // the caller to branch and use memmem explicitly, we just do it for // them. if!self.ascii_case_insensitive { if let Some(pre) = self.memmem.build() { return Some(pre); } } match (self.start_bytes.build(), self.rare_bytes.build()) { // If we could build both start and rare prefilters, then there are // a few cases in which we'd want to use the start-byte prefilter // over the rare-byte prefilter, since the former has lower // overhead. (prestart @ Some(_), prerare @ Some(_)) => { // If the start-byte prefilter can scan for a smaller number // of bytes than the rare-byte prefilter, then it's probably // faster. let has_fewer_bytes = self.start_bytes.count < self.rare_bytes.count; // Otherwise, if the combined frequency rank of the detected // bytes in the start-byte prefilter is "close" to the combined // frequency rank of the rare-byte prefilter, then we pick // the start-byte prefilter even if the rare-byte prefilter // heuristically searches for rare bytes. This is because the // rare-byte prefilter has higher constant costs, so we tend to // prefer the start-byte prefilter when we can. let has_rarer_bytes = self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; if has_fewer_bytes || has_rarer_bytes { prestart } else { prerare } } (prestart @ Some(_), None) => prestart, (None, prerare @ Some(_)) => prerare, (None, None) if self.ascii_case_insensitive => None, (None, None) => { self.packed.as_ref().and_then(|b| b.build()).map(|s| { let memory_usage = s.memory_usage(); Prefilter { finder: Arc::new(Packed(s)), memory_usage } }) } } } /// Add a literal string to this prefilter builder. pub(crate) fn add(&mut self, bytes: &[u8]) { if bytes.is_empty() { self.enabled = false; } if!self.enabled { return; } self.count += 1; self.start_bytes.add(bytes); self.rare_bytes.add(bytes); self.memmem.add(bytes); if let Some(ref mut pbuilder) = self.packed { pbuilder.add(bytes); } } } /// A type that wraps a packed searcher and implements the `Prefilter` /// interface. #[derive(Clone, Debug)] struct Packed(packed::Searcher); impl PrefilterI for Packed { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { self.0 .find_in(&haystack, span) .map_or(Candidate::None, Candidate::Match) } } /// A builder for constructing a prefilter that uses memmem. #[derive(Debug, Default)] struct MemmemBuilder { /// The number of patterns that have been added. count: usize, /// The singular pattern to search for. This is only set when count==1. one: Option<Vec<u8>>, } impl MemmemBuilder { fn build(&self) -> Option<Prefilter> { #[cfg(all(feature = "std", feature = "perf-literal"))] fn imp(builder: &MemmemBuilder) -> Option<Prefilter> { let pattern = builder.one.as_ref()?; assert_eq!(1, builder.count); let finder = Arc::new(Memmem( memchr::memmem::Finder::new(pattern).into_owned(), )); let memory_usage = pattern.len(); Some(Prefilter { finder, memory_usage }) } #[cfg(not(all(feature = "std", feature = "perf-literal")))] fn imp(_: &MemmemBuilder) -> Option<Prefilter> { None } imp(self) } fn add(&mut self, bytes: &[u8]) { self.count += 1; if self.count == 1 { self.one = Some(bytes.to_vec()); } else { self.one = None; } } } /// A type that wraps a SIMD accelerated single substring search from the /// `memchr` crate for use as a prefilter. /// /// Currently, this prefilter is only active for Aho-Corasick searchers with /// a single pattern. In theory, this could be extended to support searchers /// that have a common prefix of more than one byte (for one byte, we would use /// memchr), but it's not clear if it's worth it or not. /// /// Also, unfortunately, this currently also requires the'std' feature to /// be enabled. That's because memchr doesn't have a no-std-but-with-alloc /// mode, and so APIs like Finder::into_owned aren't available when'std' is /// disabled. But there should be an 'alloc' feature that brings in APIs like /// Finder::into_owned but doesn't use std-only features like runtime CPU /// feature detection. #[cfg(all(feature = "std", feature = "perf-literal"))] #[derive(Clone, Debug)] struct Memmem(memchr::memmem::Finder<'static>); #[cfg(all(feature = "std", feature = "perf-literal"))] impl PrefilterI for Memmem { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate
} /// A builder for constructing a rare byte prefilter. /// /// A rare byte prefilter attempts to pick out a small set of rare bytes that /// occurr in the patterns, and then quickly scan to matches of those rare /// bytes. #[derive(Clone, Debug)] struct RareBytesBuilder { /// Whether this prefilter should account for ASCII case insensitivity or /// not. ascii_case_insensitive: bool, /// A set of rare bytes, indexed by byte value. rare_set: ByteSet, /// A set of byte offsets associated with bytes in a pattern. An entry /// corresponds to a particular bytes (its index) and is only non-zero if /// the byte occurred at an offset greater than 0 in at least one pattern. /// /// If a byte's offset is not representable in 8 bits, then the rare bytes /// prefilter becomes inert. byte_offsets: RareByteOffsets, /// Whether this is available as a prefilter or not. This can be set to /// false during construction if a condition is seen that invalidates the /// use of the rare-byte prefilter. available: bool, /// The number of bytes set to an active value in `byte_offsets`. count: usize, /// The sum of frequency ranks for the rare bytes detected. This is /// intended to give a heuristic notion of how rare the bytes are. rank_sum: u16, } /// A set of byte offsets, keyed by byte. #[derive(Clone, Copy)] struct RareByteOffsets { /// Each entry corresponds to the maximum offset of the corresponding /// byte across all patterns seen. set: [RareByteOffset; 256], } impl RareByteOffsets { /// Create a new empty set of rare byte offsets. pub(crate) fn empty() -> RareByteOffsets { RareByteOffsets { set: [RareByteOffset::default(); 256] } } /// Add the given offset for the given byte to this set. If the offset is /// greater than the existing offset, then it overwrites the previous /// value and returns false. If there is no previous value set, then this /// sets it and returns true. pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { self.set[byte as usize].max = cmp::max(self.set[byte as usize].max, off.max); } } impl core::fmt::Debug for RareByteOffsets { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut offsets = vec![]; for off in self.set.iter() { if off.max > 0 { offsets.push(off); } } f.debug_struct("RareByteOffsets").field("set", &offsets).finish() } } /// Offsets associated with an occurrence of a "rare" byte in any of the /// patterns used to construct a single Aho-Corasick automaton. #[derive(Clone, Copy, Debug)] struct RareByteOffset { /// The maximum offset at which a particular byte occurs from the start /// of any pattern. This is used as a shift amount. That is, when an /// occurrence of this byte is found, the candidate position reported by /// the prefilter is `position_of_byte - max`, such that the automaton /// will begin its search at a position that is guaranteed to observe a /// match. /// /// To avoid accidentally quadratic behavior, a prefilter is considered /// ineffective when it is asked to start scanning from a position that it /// has already scanned past. /// /// Using a `u8` here means that if we ever see a pattern that's longer /// than 255 bytes, then the entire rare byte prefilter is disabled. max: u8, } impl Default for RareByteOffset { fn default() -> RareByteOffset { RareByteOffset { max: 0 } } } impl RareByteOffset { /// Create a new rare byte offset. If the given offset is too big, then /// None is returned. In that case, callers should render the rare bytes /// prefilter inert. fn new(max: usize) -> Option<RareByteOffset> { if max > u8::MAX as usize { None } else { Some(RareByteOffset { max: max as u8 }) } } } impl RareBytesBuilder { /// Create a new builder for constructing a rare byte prefilter. fn new() -> RareBytesBuilder { RareBytesBuilder { ascii_case_insensitive: false, rare_set: ByteSet::empty(), byte_offsets: RareByteOffsets::empty(), available: true, count: 0, rank_sum: 0, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { self.ascii_case_insensitive = yes; self } /// Build the rare bytes prefilter. /// /// If there are more than 3 distinct rare bytes found, or if heuristics /// otherwise determine that this prefilter should not be used, then `None` /// is returned. fn build(&self) -> Option<Prefilter> { #[cfg(feature = "perf-literal")] fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> { if!builder.available || builder.count > 3 { return None; } let (mut bytes, mut len) = ([0; 3], 0); for b in 0..=255 { if builder.rare_set.contains(b) { bytes[len] = b as u8; len += 1; } } let finder: Arc<dyn PrefilterI> = match len { 0 => return None, 1 => Arc::new(RareBytesOne { byte1: bytes[0], offset: builder.byte_offsets.set[bytes[0] as usize], }), 2 => Arc::new(RareBytesTwo { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], }), 3 => Arc::new(RareBytesThree { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], byte3: bytes[2], }), _ => unreachable!(), }; Some(Prefilter { finder, memory_usage: 0 }) } #[cfg(not(feature = "perf-literal"))] fn imp(_: &RareBytesBuilder) -> Option<Prefilter> { None } imp(self) } /// Add a byte string to this builder. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. fn add(&mut self, bytes: &[u8]) { // If we've already given up, then do nothing. if!self.available { return; } // If we've already blown our budget, then don't waste time looking // for more rare bytes. if self.count > 3 { self.available = false; return; } // If the pattern is too long, then our offset table is bunk, so // give up. if bytes.len() >= 256 { self.available = false; return; } let mut rarest = match bytes.get(0) { None => return, Some(&b) => (b, freq_rank(b)), }; // The idea here is to look for the rarest byte in each pattern, and // add that to our set. As a special exception, if we see a byte that // we've already added, then we immediately stop and choose that byte, // even if there's another rare byte in the pattern. This helps us // apply the rare byte optimization in more cases by attempting to pick // bytes that are in common between patterns. So for example, if we // were searching for `Sherlock` and `lockjaw`, then this would pick // `k` for both patterns, resulting in the use of `memchr` instead of // `memchr2` for `k` and `j`. let mut found = false; for (pos, &b) in bytes.iter().enumerate() { self.set_offset(pos, b); if found { continue; } if self.rare_set.contains(b) { found = true; continue; } let rank = freq_rank(b); if rank < rarest.1 { rarest = (b, rank); } } if!found { self.add_rare_byte(rarest.0); } } fn set_offset(&mut self, pos: usize, byte: u8) { // This unwrap is OK because pos is never bigger than our max. let offset = RareByteOffset::new(pos).unwrap(); self.byte_offsets.set(byte, offset); if self.ascii_case_insensitive { self.byte_offsets.set(opposite_ascii_case(byte), offset); } } fn add_rare_byte(&mut self, byte: u8) { self.add_one_rare_byte(byte); if self.ascii_case_insensitive { self.add_one_rare_byte(opposite_ascii_case(byte)); } } fn add_one_rare_byte(&mut self, byte: u8) { if!self.rare_set.contains(byte) { self.rare_set.add(byte); self.count += 1; self.rank_sum += freq_rank(byte) as u16; } } } /// A prefilter for scanning for a single "rare" byte. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesOne { byte1: u8, offset: RareByteOffset, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesOne { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr(self.byte1, &haystack[span]) .map(|i| { let pos = span.start + i; cmp::max( span.start, pos.saturating_sub(usize::from(self.offset.max)), ) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for two "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesTwo { offsets: RareByteOffsets, byte1: u8, byte2: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesTwo { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr2(self.byte1, self.byte2, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for three "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesThree { offsets: RareByteOffsets, byte1: u8, byte2: u8, byte3: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesThree { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate
{ use crate::util::primitives::PatternID; self.0.find(&haystack[span]).map_or(Candidate::None, |i| { let start = span.start + i; let end = start + self.0.needle().len(); // N.B. We can declare a match and use a fixed pattern ID here // because a Memmem prefilter is only ever created for searchers // with exactly one pattern. Thus, every match is always a match // and it is always for the first and only pattern. Candidate::Match(Match::new(PatternID::ZERO, start..end)) }) }
identifier_body
prefilter.rs
haystack: &[u8], span: Span) -> Candidate; } impl<P: PrefilterI +?Sized> PrefilterI for Arc<P> { #[inline(always)] fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { (**self).find_in(haystack, span) } } /// A builder for constructing the best possible prefilter. When constructed, /// this builder will heuristically select the best prefilter it can build, /// if any, and discard the rest. #[derive(Debug)] pub(crate) struct Builder { count: usize, ascii_case_insensitive: bool, start_bytes: StartBytesBuilder, rare_bytes: RareBytesBuilder, memmem: MemmemBuilder, packed: Option<packed::Builder>, // If we run across a condition that suggests we shouldn't use a prefilter // at all (like an empty pattern), then disable prefilters entirely. enabled: bool, } impl Builder { /// Create a new builder for constructing the best possible prefilter. pub(crate) fn new(kind: MatchKind) -> Builder { let pbuilder = kind .as_packed() .map(|kind| packed::Config::new().match_kind(kind).builder()); Builder { count: 0, ascii_case_insensitive: false, start_bytes: StartBytesBuilder::new(), rare_bytes: RareBytesBuilder::new(), memmem: MemmemBuilder::default(), packed: pbuilder, enabled: true, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. pub(crate) fn ascii_case_insensitive(mut self, yes: bool) -> Builder { self.ascii_case_insensitive = yes; self.start_bytes = self.start_bytes.ascii_case_insensitive(yes); self.rare_bytes = self.rare_bytes.ascii_case_insensitive(yes); self } /// Return a prefilter suitable for quickly finding potential matches. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. pub(crate) fn build(&self) -> Option<Prefilter> { if!self.enabled { return None; } // If we only have one pattern, then deferring to memmem is always // the best choice. This is kind of a weird case, because, well, why // use Aho-Corasick if you only have one pattern? But maybe you don't // know exactly how many patterns you'll get up front, and you need to // support the option of multiple patterns. So instead of relying on // the caller to branch and use memmem explicitly, we just do it for // them. if!self.ascii_case_insensitive { if let Some(pre) = self.memmem.build() { return Some(pre); } } match (self.start_bytes.build(), self.rare_bytes.build()) { // If we could build both start and rare prefilters, then there are // a few cases in which we'd want to use the start-byte prefilter // over the rare-byte prefilter, since the former has lower // overhead. (prestart @ Some(_), prerare @ Some(_)) => { // If the start-byte prefilter can scan for a smaller number // of bytes than the rare-byte prefilter, then it's probably // faster. let has_fewer_bytes = self.start_bytes.count < self.rare_bytes.count; // Otherwise, if the combined frequency rank of the detected // bytes in the start-byte prefilter is "close" to the combined // frequency rank of the rare-byte prefilter, then we pick // the start-byte prefilter even if the rare-byte prefilter // heuristically searches for rare bytes. This is because the // rare-byte prefilter has higher constant costs, so we tend to // prefer the start-byte prefilter when we can. let has_rarer_bytes = self.start_bytes.rank_sum <= self.rare_bytes.rank_sum + 50; if has_fewer_bytes || has_rarer_bytes { prestart } else { prerare } } (prestart @ Some(_), None) => prestart, (None, prerare @ Some(_)) => prerare, (None, None) if self.ascii_case_insensitive => None, (None, None) => { self.packed.as_ref().and_then(|b| b.build()).map(|s| { let memory_usage = s.memory_usage(); Prefilter { finder: Arc::new(Packed(s)), memory_usage } }) } } } /// Add a literal string to this prefilter builder. pub(crate) fn add(&mut self, bytes: &[u8]) { if bytes.is_empty() { self.enabled = false; } if!self.enabled { return; } self.count += 1; self.start_bytes.add(bytes); self.rare_bytes.add(bytes); self.memmem.add(bytes); if let Some(ref mut pbuilder) = self.packed { pbuilder.add(bytes); } } } /// A type that wraps a packed searcher and implements the `Prefilter` /// interface. #[derive(Clone, Debug)] struct Packed(packed::Searcher); impl PrefilterI for Packed { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { self.0 .find_in(&haystack, span) .map_or(Candidate::None, Candidate::Match) } } /// A builder for constructing a prefilter that uses memmem. #[derive(Debug, Default)] struct MemmemBuilder { /// The number of patterns that have been added. count: usize, /// The singular pattern to search for. This is only set when count==1. one: Option<Vec<u8>>, } impl MemmemBuilder { fn build(&self) -> Option<Prefilter> { #[cfg(all(feature = "std", feature = "perf-literal"))] fn imp(builder: &MemmemBuilder) -> Option<Prefilter> { let pattern = builder.one.as_ref()?; assert_eq!(1, builder.count); let finder = Arc::new(Memmem( memchr::memmem::Finder::new(pattern).into_owned(), )); let memory_usage = pattern.len(); Some(Prefilter { finder, memory_usage }) } #[cfg(not(all(feature = "std", feature = "perf-literal")))] fn imp(_: &MemmemBuilder) -> Option<Prefilter> { None } imp(self) } fn add(&mut self, bytes: &[u8]) { self.count += 1; if self.count == 1 { self.one = Some(bytes.to_vec()); } else { self.one = None; } } } /// A type that wraps a SIMD accelerated single substring search from the /// `memchr` crate for use as a prefilter. /// /// Currently, this prefilter is only active for Aho-Corasick searchers with /// a single pattern. In theory, this could be extended to support searchers /// that have a common prefix of more than one byte (for one byte, we would use /// memchr), but it's not clear if it's worth it or not. /// /// Also, unfortunately, this currently also requires the'std' feature to /// be enabled. That's because memchr doesn't have a no-std-but-with-alloc /// mode, and so APIs like Finder::into_owned aren't available when'std' is /// disabled. But there should be an 'alloc' feature that brings in APIs like /// Finder::into_owned but doesn't use std-only features like runtime CPU /// feature detection. #[cfg(all(feature = "std", feature = "perf-literal"))] #[derive(Clone, Debug)] struct Memmem(memchr::memmem::Finder<'static>); #[cfg(all(feature = "std", feature = "perf-literal"))] impl PrefilterI for Memmem { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { use crate::util::primitives::PatternID; self.0.find(&haystack[span]).map_or(Candidate::None, |i| { let start = span.start + i; let end = start + self.0.needle().len(); // N.B. We can declare a match and use a fixed pattern ID here // because a Memmem prefilter is only ever created for searchers // with exactly one pattern. Thus, every match is always a match // and it is always for the first and only pattern. Candidate::Match(Match::new(PatternID::ZERO, start..end)) }) } } /// A builder for constructing a rare byte prefilter. /// /// A rare byte prefilter attempts to pick out a small set of rare bytes that /// occurr in the patterns, and then quickly scan to matches of those rare /// bytes. #[derive(Clone, Debug)] struct RareBytesBuilder { /// Whether this prefilter should account for ASCII case insensitivity or /// not. ascii_case_insensitive: bool, /// A set of rare bytes, indexed by byte value. rare_set: ByteSet, /// A set of byte offsets associated with bytes in a pattern. An entry /// corresponds to a particular bytes (its index) and is only non-zero if /// the byte occurred at an offset greater than 0 in at least one pattern. /// /// If a byte's offset is not representable in 8 bits, then the rare bytes /// prefilter becomes inert. byte_offsets: RareByteOffsets, /// Whether this is available as a prefilter or not. This can be set to /// false during construction if a condition is seen that invalidates the /// use of the rare-byte prefilter. available: bool, /// The number of bytes set to an active value in `byte_offsets`. count: usize, /// The sum of frequency ranks for the rare bytes detected. This is /// intended to give a heuristic notion of how rare the bytes are. rank_sum: u16, } /// A set of byte offsets, keyed by byte. #[derive(Clone, Copy)] struct RareByteOffsets { /// Each entry corresponds to the maximum offset of the corresponding /// byte across all patterns seen. set: [RareByteOffset; 256], } impl RareByteOffsets { /// Create a new empty set of rare byte offsets. pub(crate) fn empty() -> RareByteOffsets { RareByteOffsets { set: [RareByteOffset::default(); 256] } } /// Add the given offset for the given byte to this set. If the offset is /// greater than the existing offset, then it overwrites the previous /// value and returns false. If there is no previous value set, then this /// sets it and returns true. pub(crate) fn set(&mut self, byte: u8, off: RareByteOffset) { self.set[byte as usize].max = cmp::max(self.set[byte as usize].max, off.max); } } impl core::fmt::Debug for RareByteOffsets { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut offsets = vec![]; for off in self.set.iter() { if off.max > 0 { offsets.push(off); } } f.debug_struct("RareByteOffsets").field("set", &offsets).finish() } } /// Offsets associated with an occurrence of a "rare" byte in any of the /// patterns used to construct a single Aho-Corasick automaton. #[derive(Clone, Copy, Debug)] struct RareByteOffset { /// The maximum offset at which a particular byte occurs from the start /// of any pattern. This is used as a shift amount. That is, when an /// occurrence of this byte is found, the candidate position reported by /// the prefilter is `position_of_byte - max`, such that the automaton /// will begin its search at a position that is guaranteed to observe a /// match. /// /// To avoid accidentally quadratic behavior, a prefilter is considered /// ineffective when it is asked to start scanning from a position that it /// has already scanned past. /// /// Using a `u8` here means that if we ever see a pattern that's longer /// than 255 bytes, then the entire rare byte prefilter is disabled. max: u8, } impl Default for RareByteOffset { fn default() -> RareByteOffset { RareByteOffset { max: 0 } } } impl RareByteOffset { /// Create a new rare byte offset. If the given offset is too big, then /// None is returned. In that case, callers should render the rare bytes /// prefilter inert. fn new(max: usize) -> Option<RareByteOffset> { if max > u8::MAX as usize { None } else { Some(RareByteOffset { max: max as u8 }) } } } impl RareBytesBuilder { /// Create a new builder for constructing a rare byte prefilter. fn new() -> RareBytesBuilder { RareBytesBuilder { ascii_case_insensitive: false, rare_set: ByteSet::empty(), byte_offsets: RareByteOffsets::empty(), available: true, count: 0, rank_sum: 0, } } /// Enable ASCII case insensitivity. When set, byte strings added to this /// builder will be interpreted without respect to ASCII case. fn ascii_case_insensitive(mut self, yes: bool) -> RareBytesBuilder { self.ascii_case_insensitive = yes; self } /// Build the rare bytes prefilter. /// /// If there are more than 3 distinct rare bytes found, or if heuristics /// otherwise determine that this prefilter should not be used, then `None` /// is returned. fn build(&self) -> Option<Prefilter> { #[cfg(feature = "perf-literal")] fn imp(builder: &RareBytesBuilder) -> Option<Prefilter> { if!builder.available || builder.count > 3 { return None;
for b in 0..=255 { if builder.rare_set.contains(b) { bytes[len] = b as u8; len += 1; } } let finder: Arc<dyn PrefilterI> = match len { 0 => return None, 1 => Arc::new(RareBytesOne { byte1: bytes[0], offset: builder.byte_offsets.set[bytes[0] as usize], }), 2 => Arc::new(RareBytesTwo { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], }), 3 => Arc::new(RareBytesThree { offsets: builder.byte_offsets, byte1: bytes[0], byte2: bytes[1], byte3: bytes[2], }), _ => unreachable!(), }; Some(Prefilter { finder, memory_usage: 0 }) } #[cfg(not(feature = "perf-literal"))] fn imp(_: &RareBytesBuilder) -> Option<Prefilter> { None } imp(self) } /// Add a byte string to this builder. /// /// All patterns added to an Aho-Corasick automaton should be added to this /// builder before attempting to construct the prefilter. fn add(&mut self, bytes: &[u8]) { // If we've already given up, then do nothing. if!self.available { return; } // If we've already blown our budget, then don't waste time looking // for more rare bytes. if self.count > 3 { self.available = false; return; } // If the pattern is too long, then our offset table is bunk, so // give up. if bytes.len() >= 256 { self.available = false; return; } let mut rarest = match bytes.get(0) { None => return, Some(&b) => (b, freq_rank(b)), }; // The idea here is to look for the rarest byte in each pattern, and // add that to our set. As a special exception, if we see a byte that // we've already added, then we immediately stop and choose that byte, // even if there's another rare byte in the pattern. This helps us // apply the rare byte optimization in more cases by attempting to pick // bytes that are in common between patterns. So for example, if we // were searching for `Sherlock` and `lockjaw`, then this would pick // `k` for both patterns, resulting in the use of `memchr` instead of // `memchr2` for `k` and `j`. let mut found = false; for (pos, &b) in bytes.iter().enumerate() { self.set_offset(pos, b); if found { continue; } if self.rare_set.contains(b) { found = true; continue; } let rank = freq_rank(b); if rank < rarest.1 { rarest = (b, rank); } } if!found { self.add_rare_byte(rarest.0); } } fn set_offset(&mut self, pos: usize, byte: u8) { // This unwrap is OK because pos is never bigger than our max. let offset = RareByteOffset::new(pos).unwrap(); self.byte_offsets.set(byte, offset); if self.ascii_case_insensitive { self.byte_offsets.set(opposite_ascii_case(byte), offset); } } fn add_rare_byte(&mut self, byte: u8) { self.add_one_rare_byte(byte); if self.ascii_case_insensitive { self.add_one_rare_byte(opposite_ascii_case(byte)); } } fn add_one_rare_byte(&mut self, byte: u8) { if!self.rare_set.contains(byte) { self.rare_set.add(byte); self.count += 1; self.rank_sum += freq_rank(byte) as u16; } } } /// A prefilter for scanning for a single "rare" byte. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesOne { byte1: u8, offset: RareByteOffset, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesOne { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr(self.byte1, &haystack[span]) .map(|i| { let pos = span.start + i; cmp::max( span.start, pos.saturating_sub(usize::from(self.offset.max)), ) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for two "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesTwo { offsets: RareByteOffsets, byte1: u8, byte2: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesTwo { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr2(self.byte1, self.byte2, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate::None, Candidate::PossibleStartOfMatch) } } /// A prefilter for scanning for three "rare" bytes. #[cfg(feature = "perf-literal")] #[derive(Clone, Debug)] struct RareBytesThree { offsets: RareByteOffsets, byte1: u8, byte2: u8, byte3: u8, } #[cfg(feature = "perf-literal")] impl PrefilterI for RareBytesThree { fn find_in(&self, haystack: &[u8], span: Span) -> Candidate { memchr::memchr3(self.byte1, self.byte2, self.byte3, &haystack[span]) .map(|i| { let pos = span.start + i; let offset = self.offsets.set[usize::from(haystack[pos])].max; cmp::max(span.start, pos.saturating_sub(usize::from(offset))) }) .map_or(Candidate::None
} let (mut bytes, mut len) = ([0; 3], 0);
random_line_split
proxy.rs
Request<Body>) -> Response<Body> where T: AsyncRead + AsyncWrite + Send +'static + Sync, { let uc = UpstreamConnect::new(t); let mut res = Response::new(Body::empty()); *res.status_mut() = StatusCode::OK; res } fn result_502_resolve_failed<'a>(m: &'a str) -> Response<Body> { let mut res = Response::new(Body::from(format!("Failed to resolve upstream: {}", m))); *res.status_mut() = StatusCode::BAD_GATEWAY; return res; } fn result_unboxed(c: u16) -> Response<Body> { let mut res = Response::new(Body::empty()); // TODO(matt) use constants *res.status_mut() = StatusCode::from_u16(c).unwrap(); res } fn result(c: u16) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let mut res = Response::new(Body::empty()); // TODO(matt) use constants *res.status_mut() = StatusCode::from_u16(c).unwrap(); Box::new(futures::future::ok(res)) } fn crappy_log(r: &Request<Body>) { println!("{:?} {}", r.method(), r.uri()) } fn normalize_authority(uri: &hyper::Uri) -> String { // There are 3 forms let pp = uri.port_u16().unwrap_or(80); format!("{}:{}", uri.host().unwrap_or(""), pp) } pub struct UserIdentity { pub uuid: String, pub friendly_name: Option<String>, pub attributes: Option<HashMap<String, String>>, } pub enum Identity { User(UserIdentity), Anonymous, Role(String), } pub trait Authenticate { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>; } pub enum AuthzResult { Allow, Disallow, } pub trait Authorize { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>; } pub trait SiteAuthorize { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>; } #[derive(Clone)] pub struct AuthConfig<U, S, A> where U: Authenticate + Clone, S: SiteAuthorize + Clone, A: Authorize + Clone, { authenticate: U, site: S, authorize: A, } fn handle_tls_raw<C: Connect +'static>( req_uuid: uuid::Uuid, _client: &Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let cpair = TcpStream::connect(&upstream_addr) .map(|upstream| { println!("Connection established"); let _ = resp_tx.send(()).unwrap(); upstream }) .map_err(|err| eprintln!("connect: {}", err)); let upgraded = req.into_body().on_upgrade(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(|(upstream, downstream)| { println!("In up/down"); let (u2dr, u2dw) = upstream.split(); let (d2ur, d2uw) = downstream.split(); let u2df = copy(u2dr, d2uw); let d2uf = copy(d2ur, u2dw); d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err)) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) // result(200) } fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool { true } trait RequestFilter { type Future: Future<Item = Request<Body>>; fn filter(&self, req: Request<Body>) -> Self::Future; } trait ResponseFilter { type Future: Future<Item = Response<Body>>; fn filter(&self, req: Response<Body>) -> Self::Future; } #[derive(Clone)] struct AdWareBlock; impl SiteAuthorize for AdWareBlock { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> { if url.starts_with("adservice.google.com") { return Ok(AuthzResult::Disallow); } Ok(AuthzResult::Allow) } } #[derive(Clone)] struct AllowAll; impl Authorize for AllowAll { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> { Ok(AuthzResult::Allow) } } #[derive(Clone)] struct NoAuth; impl Authenticate for NoAuth { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>
} pub enum Trace { TraceId(String), TraceSecurity(String, openssl::x509::X509), TraceRequest(String, Request<Body>), TraceResponse(String, Request<Body>), } fn make_absolute(req: &mut Request<Body>) { /* RFC 7312 5.4 When a proxy receives a request with an absolute-form of request-target, the proxy MUST ignore the received Host header field (if any) and instead replace it with the host information of the request-target. A proxy that forwards such a request MUST generate a new Host field-value based on the received request-target rather than forward the received Host field-value. */ match req.method() { &Method::CONNECT => {} _ => { let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) }; if let Some(n) = nhost { req.headers_mut() .insert(http::header::HOST, n.parse().unwrap()); return; } let nuri = req.headers().get(http::header::HOST).map(|host| { let autht: Authority = host.to_str().unwrap().parse().unwrap(); let mut builder = hyper::Uri::builder(); builder.authority(autht); //TODO(matt) do as map[ if let Some(p) = req.uri().path_and_query() { builder.path_and_query(p.as_str()); } if let Some(p) = req.uri().scheme_part() { builder.scheme(p.as_str()); } else { // Ok so this kind of sketchy, but since this is fixing up a client connection // we'll never see an https one. Why? https is via CONNECT at the proxy builder.scheme("http"); } builder.build().unwrap() }); match nuri { Some(n) => *req.uri_mut() = n, None => {} } } } } #[derive(Clone)] struct Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone +'static, S: SiteAuthorize + Sync + Send + Clone +'static, A: Authorize + Sync + Send + Clone +'static, { //TODO(matt) - trace filter tracer: Option<mpsc::Sender<Trace>>, ca: Arc<ca::CertAuthority>, auth_config: AuthConfig<U, S, A>, upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>, } impl<U, S, A> Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone, S: SiteAuthorize + Sync + Send + Clone, A: Authorize + Sync + Send + Clone, { // Rework this instead of duping proxy do somehting else fn dup(&self) -> Proxy<U, S, A> { Proxy { tracer: self.tracer.iter().map(|t| t.clone()).next(), ca: self.ca.clone(), auth_config: self.auth_config.clone(), upstream_ssl_pool: pool::Pool::empty(100), } } fn handle<C: Connect +'static>( &self, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let req_uuid = uuid::Uuid::new_v4(); let hostname = normalize_authority(req.uri()); // TODO this is slow and not async, and crappy let upstream_addr = match hostname.to_socket_addrs() { Ok(mut addrs) => match addrs.next() { Some(addr) => addr, None => return result(502), }, Err(e) => { eprintln!("Upstream resolution: ({}): {}", hostname, e); return Box::new(futures::future::ok(result_502_resolve_failed(&hostname))); } }; let uid = self.auth_config.authenticate.authenticate(&req); let x = uid .and_then(|u| { self.auth_config .site .authorize(&u, &hostname) .map(|r| (u, r)) }) .and_then(|(u, site_result)| { self.auth_config .authorize .authorize(&u, &req) .map(|ar| (u, site_result, ar)) }); let _user = match x { Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u, Err(_) => return result(401), _ => return result(403), }; self.handle_inner(req_uuid, upstream_addr, client, req) } fn handle_inner<C: Connect +'static>( &self, req_uuid: uuid::Uuid, upstream_addr: std::net::SocketAddr, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { crappy_log(&req); let mitm_enabled = true; match req.method() { &Method::CONNECT => match is_mitm(&req, mitm_enabled) { true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req), false => handle_tls_raw(req_uuid, client, upstream_addr, req), }, _ => self.handle_http(req_uuid, client, req), } } fn handle_http_forward<C: Connect +'static>( &self, req_uuid: uuid::Uuid, mut client: Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let client = client.request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_http<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: &Client<C>, mut req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { make_absolute(&mut req); let client = client.clone().request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_mitm<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let authority = req.uri().authority_part().unwrap().clone(); let cpair = TcpStream::connect(&upstream_addr) .map_err(|err| eprintln!("mitm tcp connect: {}", err)) .and_then(move |upstream| { let cx = SslConnector::builder(SslMethod::tls()).unwrap().build(); cx.connect_async(authority.host(), upstream) .map(|ssl_conn| { let _ = resp_tx.send(()).unwrap(); println!("MITM Connection established"); let peer_cert = { ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() }; (ssl_conn, peer_cert) }) .map_err(|e| println!("tls error: {:}", e)) }); let upgraded = req.into_body().on_upgrade(); let ca = self.ca.clone(); let np = self.clone(); let req_uuid = req_uuid.clone(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(move |tuple| { let (downstream, (upstream, peer_cert)) = tuple; let ca = ca; let req_uuid = req_uuid; let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap(); let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); acceptor.set_private_key(ca.child_key.as_ref()).unwrap(); acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap(); acceptor.check_private_key().unwrap(); let acceptor = acceptor.build(); acceptor .accept_async(downstream) .map_err(|e| eprintln!("accept: {}", e)) .and_then(move |tls_downstream| { // This should cause the pool to have a single entry // and then magic let upstream_pool = { let local_pool = pool::Pool::empty(1); let pooled_upstream = pool::PoolItem::new(upstream); pool::PoolItem::attach(pooled_upstream, local_pool.clone()); local_pool }; Http::new() .serve_connection( tls_downstream, service_fn(move |req: Request<Body>| { let upstream_pool = upstream_pool.clone(); let uc = Client::builder() .keep_alive(false) .build(AlreadyConnected(upstream_pool)); // println!("In inner client handler: {} {:?}", req_uuid, req); np.handle_http(req_uuid, &uc, req) }), ) .map_err(|err| { eprintln!("Error in inner http: {}", err); () }) // This is proxy without analysis, just forward // serve_connection // let (u2dr, u2dw) = upstream_conn.split(); // let (d2ur, d2uw) = tls_downstream.split(); // let u2df = copy(u2dr, d2uw); // let d2uf = copy(d2ur, u2dw); // d2uf.join(u2df) // .map_err(|err| eprintln!("mitm forward: {}", err)); }) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) } } struct AlreadyConnected<T: Send +'static + AsyncRead + AsyncWrite +'static +
{ Ok(Identity::Anonymous) }
identifier_body
proxy.rs
pub friendly_name: Option<String>, pub attributes: Option<HashMap<String, String>>, } pub enum Identity { User(UserIdentity), Anonymous, Role(String), } pub trait Authenticate { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>; } pub enum AuthzResult { Allow, Disallow, } pub trait Authorize { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>; } pub trait SiteAuthorize { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>; } #[derive(Clone)] pub struct AuthConfig<U, S, A> where U: Authenticate + Clone, S: SiteAuthorize + Clone, A: Authorize + Clone, { authenticate: U, site: S, authorize: A, } fn handle_tls_raw<C: Connect +'static>( req_uuid: uuid::Uuid, _client: &Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let cpair = TcpStream::connect(&upstream_addr) .map(|upstream| { println!("Connection established"); let _ = resp_tx.send(()).unwrap(); upstream }) .map_err(|err| eprintln!("connect: {}", err)); let upgraded = req.into_body().on_upgrade(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(|(upstream, downstream)| { println!("In up/down"); let (u2dr, u2dw) = upstream.split(); let (d2ur, d2uw) = downstream.split(); let u2df = copy(u2dr, d2uw); let d2uf = copy(d2ur, u2dw); d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err)) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) // result(200) } fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool { true } trait RequestFilter { type Future: Future<Item = Request<Body>>; fn filter(&self, req: Request<Body>) -> Self::Future; } trait ResponseFilter { type Future: Future<Item = Response<Body>>; fn filter(&self, req: Response<Body>) -> Self::Future; } #[derive(Clone)] struct AdWareBlock; impl SiteAuthorize for AdWareBlock { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> { if url.starts_with("adservice.google.com") { return Ok(AuthzResult::Disallow); } Ok(AuthzResult::Allow) } } #[derive(Clone)] struct AllowAll; impl Authorize for AllowAll { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> { Ok(AuthzResult::Allow) } } #[derive(Clone)] struct NoAuth; impl Authenticate for NoAuth { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> { Ok(Identity::Anonymous) } } pub enum Trace { TraceId(String), TraceSecurity(String, openssl::x509::X509), TraceRequest(String, Request<Body>), TraceResponse(String, Request<Body>), } fn make_absolute(req: &mut Request<Body>) { /* RFC 7312 5.4 When a proxy receives a request with an absolute-form of request-target, the proxy MUST ignore the received Host header field (if any) and instead replace it with the host information of the request-target. A proxy that forwards such a request MUST generate a new Host field-value based on the received request-target rather than forward the received Host field-value. */ match req.method() { &Method::CONNECT => {} _ => { let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) }; if let Some(n) = nhost { req.headers_mut() .insert(http::header::HOST, n.parse().unwrap()); return; } let nuri = req.headers().get(http::header::HOST).map(|host| { let autht: Authority = host.to_str().unwrap().parse().unwrap(); let mut builder = hyper::Uri::builder(); builder.authority(autht); //TODO(matt) do as map[ if let Some(p) = req.uri().path_and_query() { builder.path_and_query(p.as_str()); } if let Some(p) = req.uri().scheme_part() { builder.scheme(p.as_str()); } else { // Ok so this kind of sketchy, but since this is fixing up a client connection // we'll never see an https one. Why? https is via CONNECT at the proxy builder.scheme("http"); } builder.build().unwrap() }); match nuri { Some(n) => *req.uri_mut() = n, None => {} } } } } #[derive(Clone)] struct Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone +'static, S: SiteAuthorize + Sync + Send + Clone +'static, A: Authorize + Sync + Send + Clone +'static, { //TODO(matt) - trace filter tracer: Option<mpsc::Sender<Trace>>, ca: Arc<ca::CertAuthority>, auth_config: AuthConfig<U, S, A>, upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>, } impl<U, S, A> Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone, S: SiteAuthorize + Sync + Send + Clone, A: Authorize + Sync + Send + Clone, { // Rework this instead of duping proxy do somehting else fn dup(&self) -> Proxy<U, S, A> { Proxy { tracer: self.tracer.iter().map(|t| t.clone()).next(), ca: self.ca.clone(), auth_config: self.auth_config.clone(), upstream_ssl_pool: pool::Pool::empty(100), } } fn handle<C: Connect +'static>( &self, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let req_uuid = uuid::Uuid::new_v4(); let hostname = normalize_authority(req.uri()); // TODO this is slow and not async, and crappy let upstream_addr = match hostname.to_socket_addrs() { Ok(mut addrs) => match addrs.next() { Some(addr) => addr, None => return result(502), }, Err(e) => { eprintln!("Upstream resolution: ({}): {}", hostname, e); return Box::new(futures::future::ok(result_502_resolve_failed(&hostname))); } }; let uid = self.auth_config.authenticate.authenticate(&req); let x = uid .and_then(|u| { self.auth_config .site .authorize(&u, &hostname) .map(|r| (u, r)) }) .and_then(|(u, site_result)| { self.auth_config .authorize .authorize(&u, &req) .map(|ar| (u, site_result, ar)) }); let _user = match x { Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u, Err(_) => return result(401), _ => return result(403), }; self.handle_inner(req_uuid, upstream_addr, client, req) } fn handle_inner<C: Connect +'static>( &self, req_uuid: uuid::Uuid, upstream_addr: std::net::SocketAddr, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { crappy_log(&req); let mitm_enabled = true; match req.method() { &Method::CONNECT => match is_mitm(&req, mitm_enabled) { true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req), false => handle_tls_raw(req_uuid, client, upstream_addr, req), }, _ => self.handle_http(req_uuid, client, req), } } fn handle_http_forward<C: Connect +'static>( &self, req_uuid: uuid::Uuid, mut client: Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let client = client.request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_http<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: &Client<C>, mut req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { make_absolute(&mut req); let client = client.clone().request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_mitm<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let authority = req.uri().authority_part().unwrap().clone(); let cpair = TcpStream::connect(&upstream_addr) .map_err(|err| eprintln!("mitm tcp connect: {}", err)) .and_then(move |upstream| { let cx = SslConnector::builder(SslMethod::tls()).unwrap().build(); cx.connect_async(authority.host(), upstream) .map(|ssl_conn| { let _ = resp_tx.send(()).unwrap(); println!("MITM Connection established"); let peer_cert = { ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() }; (ssl_conn, peer_cert) }) .map_err(|e| println!("tls error: {:}", e)) }); let upgraded = req.into_body().on_upgrade(); let ca = self.ca.clone(); let np = self.clone(); let req_uuid = req_uuid.clone(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(move |tuple| { let (downstream, (upstream, peer_cert)) = tuple; let ca = ca; let req_uuid = req_uuid; let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap(); let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); acceptor.set_private_key(ca.child_key.as_ref()).unwrap(); acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap(); acceptor.check_private_key().unwrap(); let acceptor = acceptor.build(); acceptor .accept_async(downstream) .map_err(|e| eprintln!("accept: {}", e)) .and_then(move |tls_downstream| { // This should cause the pool to have a single entry // and then magic let upstream_pool = { let local_pool = pool::Pool::empty(1); let pooled_upstream = pool::PoolItem::new(upstream); pool::PoolItem::attach(pooled_upstream, local_pool.clone()); local_pool }; Http::new() .serve_connection( tls_downstream, service_fn(move |req: Request<Body>| { let upstream_pool = upstream_pool.clone(); let uc = Client::builder() .keep_alive(false) .build(AlreadyConnected(upstream_pool)); // println!("In inner client handler: {} {:?}", req_uuid, req); np.handle_http(req_uuid, &uc, req) }), ) .map_err(|err| { eprintln!("Error in inner http: {}", err); () }) // This is proxy without analysis, just forward // serve_connection // let (u2dr, u2dw) = upstream_conn.split(); // let (d2ur, d2uw) = tls_downstream.split(); // let u2df = copy(u2dr, d2uw); // let d2uf = copy(d2ur, u2dw); // d2uf.join(u2df) // .map_err(|err| eprintln!("mitm forward: {}", err)); }) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) } } struct AlreadyConnected<T: Send +'static + AsyncRead + AsyncWrite +'static + Sync>( Arc<pool::Pool<T>>, ); impl<T: Send +'static + AsyncRead + AsyncWrite +'static + Sync> Connect for AlreadyConnected<T> { type Transport = pool::PoolItem<T>; /// An error occured when trying to connect. type Error = io::Error; /// A Future that will resolve to the connected Transport. type Future = Box<Future<Item = (Self::Transport, Connected), Error = Self::Error> + Send>; /// Connect to a destination. fn connect(&self, _: hyper::client::connect::Destination) -> Self::Future { let o = pool::Pool::checkout(self.0.clone()).unwrap(); Box::new(futures::future::ok(( o, hyper::client::connect::Connected::new(), ))) } } fn trace_handler(mut rx: mpsc::Receiver<Trace>) { let _t = std::thread::spawn(move || { let done = rx.for_each(|tx| { match tx { Trace::TraceId(uuid) => { println!("Begin Tracing {}", uuid); } _ => {}
} println!("Trace recv"); Ok(()) });
random_line_split
proxy.rs
Request<Body>) -> Response<Body> where T: AsyncRead + AsyncWrite + Send +'static + Sync, { let uc = UpstreamConnect::new(t); let mut res = Response::new(Body::empty()); *res.status_mut() = StatusCode::OK; res } fn result_502_resolve_failed<'a>(m: &'a str) -> Response<Body> { let mut res = Response::new(Body::from(format!("Failed to resolve upstream: {}", m))); *res.status_mut() = StatusCode::BAD_GATEWAY; return res; } fn result_unboxed(c: u16) -> Response<Body> { let mut res = Response::new(Body::empty()); // TODO(matt) use constants *res.status_mut() = StatusCode::from_u16(c).unwrap(); res } fn result(c: u16) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let mut res = Response::new(Body::empty()); // TODO(matt) use constants *res.status_mut() = StatusCode::from_u16(c).unwrap(); Box::new(futures::future::ok(res)) } fn crappy_log(r: &Request<Body>) { println!("{:?} {}", r.method(), r.uri()) } fn normalize_authority(uri: &hyper::Uri) -> String { // There are 3 forms let pp = uri.port_u16().unwrap_or(80); format!("{}:{}", uri.host().unwrap_or(""), pp) } pub struct UserIdentity { pub uuid: String, pub friendly_name: Option<String>, pub attributes: Option<HashMap<String, String>>, } pub enum Identity { User(UserIdentity), Anonymous, Role(String), } pub trait Authenticate { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String>; } pub enum AuthzResult { Allow, Disallow, } pub trait Authorize { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String>; } pub trait SiteAuthorize { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String>; } #[derive(Clone)] pub struct AuthConfig<U, S, A> where U: Authenticate + Clone, S: SiteAuthorize + Clone, A: Authorize + Clone, { authenticate: U, site: S, authorize: A, } fn handle_tls_raw<C: Connect +'static>( req_uuid: uuid::Uuid, _client: &Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let cpair = TcpStream::connect(&upstream_addr) .map(|upstream| { println!("Connection established"); let _ = resp_tx.send(()).unwrap(); upstream }) .map_err(|err| eprintln!("connect: {}", err)); let upgraded = req.into_body().on_upgrade(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(|(upstream, downstream)| { println!("In up/down"); let (u2dr, u2dw) = upstream.split(); let (d2ur, d2uw) = downstream.split(); let u2df = copy(u2dr, d2uw); let d2uf = copy(d2ur, u2dw); d2uf.join(u2df).map_err(|err| eprintln!("connect: {}", err)) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) // result(200) } fn is_mitm(r: &Request<Body>, mitm_enabled: bool) -> bool { true } trait RequestFilter { type Future: Future<Item = Request<Body>>; fn filter(&self, req: Request<Body>) -> Self::Future; } trait ResponseFilter { type Future: Future<Item = Response<Body>>; fn filter(&self, req: Response<Body>) -> Self::Future; } #[derive(Clone)] struct AdWareBlock; impl SiteAuthorize for AdWareBlock { fn authorize(&self, i: &Identity, url: &str) -> Result<AuthzResult, String> { if url.starts_with("adservice.google.com") { return Ok(AuthzResult::Disallow); } Ok(AuthzResult::Allow) } } #[derive(Clone)] struct AllowAll; impl Authorize for AllowAll { fn authorize(&self, i: &Identity, req: &Request<Body>) -> Result<AuthzResult, String> { Ok(AuthzResult::Allow) } } #[derive(Clone)] struct NoAuth; impl Authenticate for NoAuth { fn authenticate(&self, req: &Request<Body>) -> Result<Identity, String> { Ok(Identity::Anonymous) } } pub enum Trace { TraceId(String), TraceSecurity(String, openssl::x509::X509), TraceRequest(String, Request<Body>), TraceResponse(String, Request<Body>), } fn make_absolute(req: &mut Request<Body>) { /* RFC 7312 5.4 When a proxy receives a request with an absolute-form of request-target, the proxy MUST ignore the received Host header field (if any) and instead replace it with the host information of the request-target. A proxy that forwards such a request MUST generate a new Host field-value based on the received request-target rather than forward the received Host field-value. */ match req.method() { &Method::CONNECT => {} _ => { let nhost: Option<String> = { req.uri().authority_part().map(|a| a.as_str().into()) }; if let Some(n) = nhost { req.headers_mut() .insert(http::header::HOST, n.parse().unwrap()); return; } let nuri = req.headers().get(http::header::HOST).map(|host| { let autht: Authority = host.to_str().unwrap().parse().unwrap(); let mut builder = hyper::Uri::builder(); builder.authority(autht); //TODO(matt) do as map[ if let Some(p) = req.uri().path_and_query() { builder.path_and_query(p.as_str()); } if let Some(p) = req.uri().scheme_part() { builder.scheme(p.as_str()); } else { // Ok so this kind of sketchy, but since this is fixing up a client connection // we'll never see an https one. Why? https is via CONNECT at the proxy builder.scheme("http"); } builder.build().unwrap() }); match nuri { Some(n) => *req.uri_mut() = n, None => {} } } } } #[derive(Clone)] struct Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone +'static, S: SiteAuthorize + Sync + Send + Clone +'static, A: Authorize + Sync + Send + Clone +'static, { //TODO(matt) - trace filter tracer: Option<mpsc::Sender<Trace>>, ca: Arc<ca::CertAuthority>, auth_config: AuthConfig<U, S, A>, upstream_ssl_pool: Arc<pool::Pool<tokio_openssl::SslStream<tokio_tcp::TcpStream>>>, } impl<U, S, A> Proxy<U, S, A> where U: Authenticate + Sync + Send + Clone, S: SiteAuthorize + Sync + Send + Clone, A: Authorize + Sync + Send + Clone, { // Rework this instead of duping proxy do somehting else fn
(&self) -> Proxy<U, S, A> { Proxy { tracer: self.tracer.iter().map(|t| t.clone()).next(), ca: self.ca.clone(), auth_config: self.auth_config.clone(), upstream_ssl_pool: pool::Pool::empty(100), } } fn handle<C: Connect +'static>( &self, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let req_uuid = uuid::Uuid::new_v4(); let hostname = normalize_authority(req.uri()); // TODO this is slow and not async, and crappy let upstream_addr = match hostname.to_socket_addrs() { Ok(mut addrs) => match addrs.next() { Some(addr) => addr, None => return result(502), }, Err(e) => { eprintln!("Upstream resolution: ({}): {}", hostname, e); return Box::new(futures::future::ok(result_502_resolve_failed(&hostname))); } }; let uid = self.auth_config.authenticate.authenticate(&req); let x = uid .and_then(|u| { self.auth_config .site .authorize(&u, &hostname) .map(|r| (u, r)) }) .and_then(|(u, site_result)| { self.auth_config .authorize .authorize(&u, &req) .map(|ar| (u, site_result, ar)) }); let _user = match x { Ok((u, AuthzResult::Allow, AuthzResult::Allow)) => u, Err(_) => return result(401), _ => return result(403), }; self.handle_inner(req_uuid, upstream_addr, client, req) } fn handle_inner<C: Connect +'static>( &self, req_uuid: uuid::Uuid, upstream_addr: std::net::SocketAddr, client: &Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { crappy_log(&req); let mitm_enabled = true; match req.method() { &Method::CONNECT => match is_mitm(&req, mitm_enabled) { true => self.handle_mitm(req_uuid, client.clone(), upstream_addr, req), false => handle_tls_raw(req_uuid, client, upstream_addr, req), }, _ => self.handle_http(req_uuid, client, req), } } fn handle_http_forward<C: Connect +'static>( &self, req_uuid: uuid::Uuid, mut client: Client<C>, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let client = client.request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_http<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: &Client<C>, mut req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { make_absolute(&mut req); let client = client.clone().request(req); match self.tracer.clone() { Some(tx) => { let f = tx .send(Trace::TraceId(format!("{}", req_uuid))) .map_err(|e| { println!("Error in trace: {:?}", e); io::Error::from(io::ErrorKind::Other) }); Box::new( f.join(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })) .map(|(_, b)| b), ) } None => Box::new(client.map(|resp| resp).map_err(|e| { println!("Error in upstream: {:?}", e); io::Error::from(io::ErrorKind::Other) })), } } fn handle_mitm<C: Connect +'static>( &self, req_uuid: uuid::Uuid, client: Client<C>, upstream_addr: std::net::SocketAddr, req: Request<Body>, ) -> Box<Future<Item = Response<Body>, Error = io::Error> + Send> { let (resp_tx, resp_rx) = oneshot::channel(); // connect, then on_upgrade() // this needs to be reworked // there is a panic in upgrade none let authority = req.uri().authority_part().unwrap().clone(); let cpair = TcpStream::connect(&upstream_addr) .map_err(|err| eprintln!("mitm tcp connect: {}", err)) .and_then(move |upstream| { let cx = SslConnector::builder(SslMethod::tls()).unwrap().build(); cx.connect_async(authority.host(), upstream) .map(|ssl_conn| { let _ = resp_tx.send(()).unwrap(); println!("MITM Connection established"); let peer_cert = { ssl_conn.get_ref().ssl().peer_certificate().unwrap().clone() }; (ssl_conn, peer_cert) }) .map_err(|e| println!("tls error: {:}", e)) }); let upgraded = req.into_body().on_upgrade(); let ca = self.ca.clone(); let np = self.clone(); let req_uuid = req_uuid.clone(); let upg2 = upgraded .map_err(|err| eprintln!("upgrade: {}", err)) .join(cpair) .and_then(move |tuple| { let (downstream, (upstream, peer_cert)) = tuple; let ca = ca; let req_uuid = req_uuid; let peer_cert_signed = ca.sign_cert_from_cert(&peer_cert).unwrap(); let mut acceptor = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap(); acceptor.set_private_key(ca.child_key.as_ref()).unwrap(); acceptor.set_certificate(peer_cert_signed.as_ref()).unwrap(); acceptor.check_private_key().unwrap(); let acceptor = acceptor.build(); acceptor .accept_async(downstream) .map_err(|e| eprintln!("accept: {}", e)) .and_then(move |tls_downstream| { // This should cause the pool to have a single entry // and then magic let upstream_pool = { let local_pool = pool::Pool::empty(1); let pooled_upstream = pool::PoolItem::new(upstream); pool::PoolItem::attach(pooled_upstream, local_pool.clone()); local_pool }; Http::new() .serve_connection( tls_downstream, service_fn(move |req: Request<Body>| { let upstream_pool = upstream_pool.clone(); let uc = Client::builder() .keep_alive(false) .build(AlreadyConnected(upstream_pool)); // println!("In inner client handler: {} {:?}", req_uuid, req); np.handle_http(req_uuid, &uc, req) }), ) .map_err(|err| { eprintln!("Error in inner http: {}", err); () }) // This is proxy without analysis, just forward // serve_connection // let (u2dr, u2dw) = upstream_conn.split(); // let (d2ur, d2uw) = tls_downstream.split(); // let u2df = copy(u2dr, d2uw); // let d2uf = copy(d2ur, u2dw); // d2uf.join(u2df) // .map_err(|err| eprintln!("mitm forward: {}", err)); }) }) .map(|_| ()) .map_err(|e| println!("Error {:?}", e)); hyper::rt::spawn(upg2); Box::new( resp_rx .map(|_| 200) .or_else(|_| Ok(502)) .and_then(|i| result(i)), ) } } struct AlreadyConnected<T: Send +'static + AsyncRead + AsyncWrite +'static +
dup
identifier_name
main.rs
use std::{collections::HashSet, fs::File, sync::Arc, time::Duration}; use anyhow::{Context as _, Result}; use serenity::{ async_trait, client::bridge::gateway::GatewayIntents, framework::standard::{ macros::{command, group}, Args, CommandResult, StandardFramework, }, futures::StreamExt, http::Http, model::{channel::Message, gateway::Ready, id::RoleId}, prelude::*, utils::{content_safe, ContentSafeOptions, MessageBuilder}, }; use tracing::info; use types::*; mod types; const POSITIVE_REACTION: char = '✅'; const NEGATIVE_REACTION: char = '❌'; const SENT_REACTION: char = '📨'; const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60); struct ConfigContainer; impl TypeMapKey for ConfigContainer { type Value = Arc<Config>; } struct Handler; #[async_trait] impl EventHandler for Handler { async fn ready(&self, _: Context, ready: Ready) { info!("{} is connected!", ready.user.name); } } // Simple `split_once` "polyfill" since it's currently unstable. fn split_once(text: &str, pat: char) -> Option<(&str, &str)> { let mut iter = text.splitn(2, pat); Some((iter.next()?, iter.next()?)) } async fn parse_name_and_discriminator( args: &mut Args, ) -> Option<Result<(String, u16), &'static str>> { let mut name = String::new(); while let Ok(arg) = args.single::<String>() { let mut fragment = arg.as_str(); if name.is_empty() { match fragment.strip_prefix('@') { Some(trimmed) => fragment = trimmed, None => { args.rewind(); return None; } } } match split_once(fragment, '#') { Some((name_tail, discriminator_str)) => { name.push_str(name_tail); match discriminator_str.parse() { Ok(discriminator) if (1..=9999).contains(&discriminator) => { return Some(Ok((name, discriminator))) } _ => return Some(Err("invalid discriminator")), } } None => name.push_str(fragment), } } Some(Err( "invalid format; mention should be in the form `@username#discriminator`", )) } #[group("relay")] #[commands(forward)] struct Relay; #[command("forward")] async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let delegate_member = if let Ok(member) = ctx .http .get_member(config.guild_id(), msg.author.id.into()) .await { member } else { msg.channel_id .say(ctx, "Umm... have I made your acquaintance?") .await?; return Ok(()); }; if!delegate_member .roles .contains(&config.delegate_role_id().into()) { msg.channel_id .say(ctx, format!("This command is only available to delegates.")) .await?; return Ok(()); } let committee = if let Some(committee) = config .committees() .iter() .find(|&committee| delegate_member.roles.contains(&committee.role_id().into())) { committee } else { msg.channel_id .say(ctx, "Sorry, but I'm not sure which committee you're on.") .await?; return Ok(()); }; let committee_channel = ctx .cache .guild_channel(committee.channel_id()) .await .expect("failed to find committee channel"); let recipient_id = match parse_name_and_discriminator(&mut args).await { Some(res) => match res { Ok((name, discriminator)) => { let members = delegate_member.guild_id.members(ctx, None, None).await?; match members .iter() .map(|member| &member.user) .find(|&user| user.name == name && user.discriminator == discriminator) .map(|user| user.id) { Some(id) => Some(id), None => { msg.channel_id .say(ctx, "Sorry, I couldn't find that user.") .await?; return Ok(()); } } } Err(err) => { msg.channel_id .say( ctx, format!( "Sorry, I couldn't understand your mention. Problem: `{}`", err ), ) .await?; return Ok(()); } }, None => None, }; let is_external = recipient_id.is_some();
let committee_msg = committee_channel .say( ctx, &MessageBuilder::new() .push("Received request from ") .mention(&msg.author) .push(if is_external { format!( " to forward message to {}", &recipient_id.unwrap().mention() ) } else { String::new() }) .push_line(":") .push_quote_line(cleaned_content.clone()) .push_line("") .push(if is_external { "Use the reactions below to approve or deny this request. " } else { "" }) .push(format!( "Reply to this message within the next {} minutes{}to send a response.", REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " } )) .build(), ) .await?; if is_external { committee_msg.react(ctx, POSITIVE_REACTION).await?; committee_msg.react(ctx, NEGATIVE_REACTION).await?; } msg.reply( ctx, &MessageBuilder::new() .push("Your message has been forwarded to ") .push_bold_safe(committee.name()) .push(if is_external { " for approval" } else { "" }) .push(".") .build(), ) .await?; typing.stop(); if is_external { let approved = if let Some(reaction) = committee_msg .await_reaction(ctx) .timeout(REACTION_TIMEOUT) .await { match reaction .as_inner_ref() .emoji .as_data() .chars() .next() .unwrap() { POSITIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("approved") .push(".") .build(), ) .await?; true } NEGATIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("rejected") .push(".") .build(), ) .await?; false } _ => { committee_msg .reply(ctx, "Invalid reaction; rejecting request.") .await?; false } } } else { committee_msg.delete_reactions(ctx).await?; committee_msg .reply( ctx, "No consensus reached; rejecting request.", ) .await?; false }; msg.reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold(if approved { "approved" } else { "rejected" }) .push(".") .build(), ) .await?; if approved { recipient_id .unwrap() .create_dm_channel(ctx) .await? .say( ctx, &MessageBuilder::new() .push("Received message from ") .mention(&msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; } } let committee_msg_id = committee_msg.id; let mut replies = committee_channel .id .await_replies(ctx) .timeout(REACTION_TIMEOUT) .filter(move |msg| match msg.message_reference { Some(ref msg_ref) => match msg_ref.message_id { Some(m) => m == committee_msg_id, None => false, }, None => false, }) .await; while let Some(reply_msg) = replies.next().await { let cleaned_content = content_safe( &ctx.cache, &reply_msg.content, &ContentSafeOptions::default(), ) .await; msg.channel_id .say( ctx, &MessageBuilder::new() .push("Received reply from ") .mention(&reply_msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; reply_msg.react(ctx, SENT_REACTION).await?; } Ok(()) } #[group("role")] #[commands(join)] struct Role; #[command("join")] async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let in_valid_guild = match msg.guild_id { Some(id) => id.as_u64() == &config.guild_id(), None => false, }; if!in_valid_guild { msg.channel_id .say(ctx, "I'm not configured to work here.") .await?; return Ok(()); } let guild = msg.guild(ctx).await.unwrap(); let query = args.rest().to_lowercase(); let committee = if let Some(committee) = config.committees().iter().find(|&committee| { query == guild.roles[&committee.role_id().into()].name.to_lowercase() || query == committee.name() }) { committee } else { msg.reply(ctx, "Sorry, I couldn't find a committee by that name.") .await?; return Ok(()); }; let mut member = msg.member(ctx).await?; let committee_role_ids: HashSet<RoleId> = config .committees() .iter() .map(|committee| committee.role_id().into()) .collect(); let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect(); let other_committee_roles: Vec<_> = committee_role_ids .intersection(&member_role_ids) .cloned() .collect(); if!other_committee_roles.is_empty() { member.remove_roles(ctx, &other_committee_roles).await?; } let committee_role_id: RoleId = committee.role_id().into(); let delegate_role_id: RoleId = config.delegate_role_id().into(); let mut intended_roles = HashSet::with_capacity(2); intended_roles.insert(committee_role_id); intended_roles.insert(delegate_role_id); let roles_to_add: Vec<_> = intended_roles .difference(&member_role_ids) .cloned() .collect(); if!roles_to_add.is_empty() { member.add_roles(ctx, &roles_to_add).await?; } msg.react(ctx, POSITIVE_REACTION).await?; Ok(()) } #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let config_file = File::open("config.ron").context("missing config file")?; let config: Config = ron::de::from_reader(config_file).context("invalid config file")?; let bot_id = Http::new_with_token(config.token()) .get_current_application_info() .await? .id; let framework = StandardFramework::new() .configure(|c| { c.no_dm_prefix(true) .with_whitespace(true) .on_mention(Some(bot_id)) }) .group(&RELAY_GROUP) .group(&ROLE_GROUP); let mut client = Client::builder(config.token()) .event_handler(Handler) .framework(framework) .intents( GatewayIntents::DIRECT_MESSAGES | GatewayIntents::DIRECT_MESSAGE_TYPING | GatewayIntents::DIRECT_MESSAGE_REACTIONS | GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::GUILD_MESSAGE_TYPING | GatewayIntents::GUILD_MESSAGE_REACTIONS | GatewayIntents::GUILD_MEMBERS, ) .await .context("failed to create client")?; { let mut data = client.data.write().await; data.insert::<ConfigContainer>(Arc::new(config)); } client.start().await.context("failed to start client")?; Ok(()) }
let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await; let typing = msg.channel_id.start_typing(&ctx.http)?;
random_line_split
main.rs
use std::{collections::HashSet, fs::File, sync::Arc, time::Duration}; use anyhow::{Context as _, Result}; use serenity::{ async_trait, client::bridge::gateway::GatewayIntents, framework::standard::{ macros::{command, group}, Args, CommandResult, StandardFramework, }, futures::StreamExt, http::Http, model::{channel::Message, gateway::Ready, id::RoleId}, prelude::*, utils::{content_safe, ContentSafeOptions, MessageBuilder}, }; use tracing::info; use types::*; mod types; const POSITIVE_REACTION: char = '✅'; const NEGATIVE_REACTION: char = '❌'; const SENT_REACTION: char = '📨'; const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60); struct ConfigContainer; impl TypeMapKey for ConfigContainer { type Value = Arc<Config>; } struct Handler; #[async_trait] impl EventHandler for Handler { async fn ready(&self, _: Context, ready: Ready) { info!("{} is connected!", ready.user.name); } } // Simple `split_once` "polyfill" since it's currently unstable. fn split_once(text: &str, pat: char) -> Option<(&str, &str)> { let mut iter = text.splitn(2, pat); Some((iter.next()?, iter.next()?)) } async fn parse_name_and_discriminator( args: &mut Args, ) -> Option<Result<(String, u16), &'static str>> { let mut name = String::new(); while let Ok(arg) = args.single::<String>() { let mut fragment = arg.as_str(); if name.is_empty() { match fragment.strip_prefix('@') { Some(trimmed) => fragment = trimmed, None => { args.rewind(); return None; } } } match split_once(fragment, '#') { Some((name_tail, discriminator_str)) => { name.push_str(name_tail); match discriminator_str.parse() { Ok(discriminator) if (1..=9999).contains(&discriminator) => { return Some(Ok((name, discriminator))) } _ => return Some(Err("invalid discriminator")), } } None => name.push_str(fragment), } } Some(Err( "invalid format; mention should be in the form `@username#discriminator`", )) } #[group("relay")] #[commands(forward)] struct Relay;
mmand("forward")] async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let delegate_member = if let Ok(member) = ctx .http .get_member(config.guild_id(), msg.author.id.into()) .await { member } else { msg.channel_id .say(ctx, "Umm... have I made your acquaintance?") .await?; return Ok(()); }; if!delegate_member .roles .contains(&config.delegate_role_id().into()) { msg.channel_id .say(ctx, format!("This command is only available to delegates.")) .await?; return Ok(()); } let committee = if let Some(committee) = config .committees() .iter() .find(|&committee| delegate_member.roles.contains(&committee.role_id().into())) { committee } else { msg.channel_id .say(ctx, "Sorry, but I'm not sure which committee you're on.") .await?; return Ok(()); }; let committee_channel = ctx .cache .guild_channel(committee.channel_id()) .await .expect("failed to find committee channel"); let recipient_id = match parse_name_and_discriminator(&mut args).await { Some(res) => match res { Ok((name, discriminator)) => { let members = delegate_member.guild_id.members(ctx, None, None).await?; match members .iter() .map(|member| &member.user) .find(|&user| user.name == name && user.discriminator == discriminator) .map(|user| user.id) { Some(id) => Some(id), None => { msg.channel_id .say(ctx, "Sorry, I couldn't find that user.") .await?; return Ok(()); } } } Err(err) => { msg.channel_id .say( ctx, format!( "Sorry, I couldn't understand your mention. Problem: `{}`", err ), ) .await?; return Ok(()); } }, None => None, }; let is_external = recipient_id.is_some(); let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await; let typing = msg.channel_id.start_typing(&ctx.http)?; let committee_msg = committee_channel .say( ctx, &MessageBuilder::new() .push("Received request from ") .mention(&msg.author) .push(if is_external { format!( " to forward message to {}", &recipient_id.unwrap().mention() ) } else { String::new() }) .push_line(":") .push_quote_line(cleaned_content.clone()) .push_line("") .push(if is_external { "Use the reactions below to approve or deny this request. " } else { "" }) .push(format!( "Reply to this message within the next {} minutes{}to send a response.", REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " } )) .build(), ) .await?; if is_external { committee_msg.react(ctx, POSITIVE_REACTION).await?; committee_msg.react(ctx, NEGATIVE_REACTION).await?; } msg.reply( ctx, &MessageBuilder::new() .push("Your message has been forwarded to ") .push_bold_safe(committee.name()) .push(if is_external { " for approval" } else { "" }) .push(".") .build(), ) .await?; typing.stop(); if is_external { let approved = if let Some(reaction) = committee_msg .await_reaction(ctx) .timeout(REACTION_TIMEOUT) .await { match reaction .as_inner_ref() .emoji .as_data() .chars() .next() .unwrap() { POSITIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("approved") .push(".") .build(), ) .await?; true } NEGATIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("rejected") .push(".") .build(), ) .await?; false } _ => { committee_msg .reply(ctx, "Invalid reaction; rejecting request.") .await?; false } } } else { committee_msg.delete_reactions(ctx).await?; committee_msg .reply( ctx, "No consensus reached; rejecting request.", ) .await?; false }; msg.reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold(if approved { "approved" } else { "rejected" }) .push(".") .build(), ) .await?; if approved { recipient_id .unwrap() .create_dm_channel(ctx) .await? .say( ctx, &MessageBuilder::new() .push("Received message from ") .mention(&msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; } } let committee_msg_id = committee_msg.id; let mut replies = committee_channel .id .await_replies(ctx) .timeout(REACTION_TIMEOUT) .filter(move |msg| match msg.message_reference { Some(ref msg_ref) => match msg_ref.message_id { Some(m) => m == committee_msg_id, None => false, }, None => false, }) .await; while let Some(reply_msg) = replies.next().await { let cleaned_content = content_safe( &ctx.cache, &reply_msg.content, &ContentSafeOptions::default(), ) .await; msg.channel_id .say( ctx, &MessageBuilder::new() .push("Received reply from ") .mention(&reply_msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; reply_msg.react(ctx, SENT_REACTION).await?; } Ok(()) } #[group("role")] #[commands(join)] struct Role; #[command("join")] async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let in_valid_guild = match msg.guild_id { Some(id) => id.as_u64() == &config.guild_id(), None => false, }; if!in_valid_guild { msg.channel_id .say(ctx, "I'm not configured to work here.") .await?; return Ok(()); } let guild = msg.guild(ctx).await.unwrap(); let query = args.rest().to_lowercase(); let committee = if let Some(committee) = config.committees().iter().find(|&committee| { query == guild.roles[&committee.role_id().into()].name.to_lowercase() || query == committee.name() }) { committee } else { msg.reply(ctx, "Sorry, I couldn't find a committee by that name.") .await?; return Ok(()); }; let mut member = msg.member(ctx).await?; let committee_role_ids: HashSet<RoleId> = config .committees() .iter() .map(|committee| committee.role_id().into()) .collect(); let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect(); let other_committee_roles: Vec<_> = committee_role_ids .intersection(&member_role_ids) .cloned() .collect(); if!other_committee_roles.is_empty() { member.remove_roles(ctx, &other_committee_roles).await?; } let committee_role_id: RoleId = committee.role_id().into(); let delegate_role_id: RoleId = config.delegate_role_id().into(); let mut intended_roles = HashSet::with_capacity(2); intended_roles.insert(committee_role_id); intended_roles.insert(delegate_role_id); let roles_to_add: Vec<_> = intended_roles .difference(&member_role_ids) .cloned() .collect(); if!roles_to_add.is_empty() { member.add_roles(ctx, &roles_to_add).await?; } msg.react(ctx, POSITIVE_REACTION).await?; Ok(()) } #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let config_file = File::open("config.ron").context("missing config file")?; let config: Config = ron::de::from_reader(config_file).context("invalid config file")?; let bot_id = Http::new_with_token(config.token()) .get_current_application_info() .await? .id; let framework = StandardFramework::new() .configure(|c| { c.no_dm_prefix(true) .with_whitespace(true) .on_mention(Some(bot_id)) }) .group(&RELAY_GROUP) .group(&ROLE_GROUP); let mut client = Client::builder(config.token()) .event_handler(Handler) .framework(framework) .intents( GatewayIntents::DIRECT_MESSAGES | GatewayIntents::DIRECT_MESSAGE_TYPING | GatewayIntents::DIRECT_MESSAGE_REACTIONS | GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::GUILD_MESSAGE_TYPING | GatewayIntents::GUILD_MESSAGE_REACTIONS | GatewayIntents::GUILD_MEMBERS, ) .await .context("failed to create client")?; { let mut data = client.data.write().await; data.insert::<ConfigContainer>(Arc::new(config)); } client.start().await.context("failed to start client")?; Ok(()) }
#[co
identifier_name
main.rs
use std::{collections::HashSet, fs::File, sync::Arc, time::Duration}; use anyhow::{Context as _, Result}; use serenity::{ async_trait, client::bridge::gateway::GatewayIntents, framework::standard::{ macros::{command, group}, Args, CommandResult, StandardFramework, }, futures::StreamExt, http::Http, model::{channel::Message, gateway::Ready, id::RoleId}, prelude::*, utils::{content_safe, ContentSafeOptions, MessageBuilder}, }; use tracing::info; use types::*; mod types; const POSITIVE_REACTION: char = '✅'; const NEGATIVE_REACTION: char = '❌'; const SENT_REACTION: char = '📨'; const REACTION_TIMEOUT: Duration = Duration::from_secs(30 * 60); struct ConfigContainer; impl TypeMapKey for ConfigContainer { type Value = Arc<Config>; } struct Handler; #[async_trait] impl EventHandler for Handler { async fn ready(&self, _: Context, ready: Ready) { info!("{} is connected!", ready.user.name); } } // Simple `split_once` "polyfill" since it's currently unstable. fn split_once(text: &str, pat: char) -> Option<(&str, &str)> { l
fn parse_name_and_discriminator( args: &mut Args, ) -> Option<Result<(String, u16), &'static str>> { let mut name = String::new(); while let Ok(arg) = args.single::<String>() { let mut fragment = arg.as_str(); if name.is_empty() { match fragment.strip_prefix('@') { Some(trimmed) => fragment = trimmed, None => { args.rewind(); return None; } } } match split_once(fragment, '#') { Some((name_tail, discriminator_str)) => { name.push_str(name_tail); match discriminator_str.parse() { Ok(discriminator) if (1..=9999).contains(&discriminator) => { return Some(Ok((name, discriminator))) } _ => return Some(Err("invalid discriminator")), } } None => name.push_str(fragment), } } Some(Err( "invalid format; mention should be in the form `@username#discriminator`", )) } #[group("relay")] #[commands(forward)] struct Relay; #[command("forward")] async fn forward(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let delegate_member = if let Ok(member) = ctx .http .get_member(config.guild_id(), msg.author.id.into()) .await { member } else { msg.channel_id .say(ctx, "Umm... have I made your acquaintance?") .await?; return Ok(()); }; if!delegate_member .roles .contains(&config.delegate_role_id().into()) { msg.channel_id .say(ctx, format!("This command is only available to delegates.")) .await?; return Ok(()); } let committee = if let Some(committee) = config .committees() .iter() .find(|&committee| delegate_member.roles.contains(&committee.role_id().into())) { committee } else { msg.channel_id .say(ctx, "Sorry, but I'm not sure which committee you're on.") .await?; return Ok(()); }; let committee_channel = ctx .cache .guild_channel(committee.channel_id()) .await .expect("failed to find committee channel"); let recipient_id = match parse_name_and_discriminator(&mut args).await { Some(res) => match res { Ok((name, discriminator)) => { let members = delegate_member.guild_id.members(ctx, None, None).await?; match members .iter() .map(|member| &member.user) .find(|&user| user.name == name && user.discriminator == discriminator) .map(|user| user.id) { Some(id) => Some(id), None => { msg.channel_id .say(ctx, "Sorry, I couldn't find that user.") .await?; return Ok(()); } } } Err(err) => { msg.channel_id .say( ctx, format!( "Sorry, I couldn't understand your mention. Problem: `{}`", err ), ) .await?; return Ok(()); } }, None => None, }; let is_external = recipient_id.is_some(); let cleaned_content = content_safe(ctx, args.rest(), &ContentSafeOptions::default()).await; let typing = msg.channel_id.start_typing(&ctx.http)?; let committee_msg = committee_channel .say( ctx, &MessageBuilder::new() .push("Received request from ") .mention(&msg.author) .push(if is_external { format!( " to forward message to {}", &recipient_id.unwrap().mention() ) } else { String::new() }) .push_line(":") .push_quote_line(cleaned_content.clone()) .push_line("") .push(if is_external { "Use the reactions below to approve or deny this request. " } else { "" }) .push(format!( "Reply to this message within the next {} minutes{}to send a response.", REACTION_TIMEOUT.as_secs() / 60, if is_external { " after voting " } else { " " } )) .build(), ) .await?; if is_external { committee_msg.react(ctx, POSITIVE_REACTION).await?; committee_msg.react(ctx, NEGATIVE_REACTION).await?; } msg.reply( ctx, &MessageBuilder::new() .push("Your message has been forwarded to ") .push_bold_safe(committee.name()) .push(if is_external { " for approval" } else { "" }) .push(".") .build(), ) .await?; typing.stop(); if is_external { let approved = if let Some(reaction) = committee_msg .await_reaction(ctx) .timeout(REACTION_TIMEOUT) .await { match reaction .as_inner_ref() .emoji .as_data() .chars() .next() .unwrap() { POSITIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("approved") .push(".") .build(), ) .await?; true } NEGATIVE_REACTION => { committee_msg .reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold("rejected") .push(".") .build(), ) .await?; false } _ => { committee_msg .reply(ctx, "Invalid reaction; rejecting request.") .await?; false } } } else { committee_msg.delete_reactions(ctx).await?; committee_msg .reply( ctx, "No consensus reached; rejecting request.", ) .await?; false }; msg.reply( ctx, &MessageBuilder::new() .push("This request has been ") .push_bold(if approved { "approved" } else { "rejected" }) .push(".") .build(), ) .await?; if approved { recipient_id .unwrap() .create_dm_channel(ctx) .await? .say( ctx, &MessageBuilder::new() .push("Received message from ") .mention(&msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; } } let committee_msg_id = committee_msg.id; let mut replies = committee_channel .id .await_replies(ctx) .timeout(REACTION_TIMEOUT) .filter(move |msg| match msg.message_reference { Some(ref msg_ref) => match msg_ref.message_id { Some(m) => m == committee_msg_id, None => false, }, None => false, }) .await; while let Some(reply_msg) = replies.next().await { let cleaned_content = content_safe( &ctx.cache, &reply_msg.content, &ContentSafeOptions::default(), ) .await; msg.channel_id .say( ctx, &MessageBuilder::new() .push("Received reply from ") .mention(&reply_msg.author) .push_line(":") .push_quote_line(cleaned_content.clone()), ) .await?; reply_msg.react(ctx, SENT_REACTION).await?; } Ok(()) } #[group("role")] #[commands(join)] struct Role; #[command("join")] async fn join(ctx: &Context, msg: &Message, args: Args) -> CommandResult { let config = { let data = ctx.data.read().await; data.get::<ConfigContainer>().unwrap().clone() }; let in_valid_guild = match msg.guild_id { Some(id) => id.as_u64() == &config.guild_id(), None => false, }; if!in_valid_guild { msg.channel_id .say(ctx, "I'm not configured to work here.") .await?; return Ok(()); } let guild = msg.guild(ctx).await.unwrap(); let query = args.rest().to_lowercase(); let committee = if let Some(committee) = config.committees().iter().find(|&committee| { query == guild.roles[&committee.role_id().into()].name.to_lowercase() || query == committee.name() }) { committee } else { msg.reply(ctx, "Sorry, I couldn't find a committee by that name.") .await?; return Ok(()); }; let mut member = msg.member(ctx).await?; let committee_role_ids: HashSet<RoleId> = config .committees() .iter() .map(|committee| committee.role_id().into()) .collect(); let member_role_ids: HashSet<RoleId> = member.roles.iter().copied().collect(); let other_committee_roles: Vec<_> = committee_role_ids .intersection(&member_role_ids) .cloned() .collect(); if!other_committee_roles.is_empty() { member.remove_roles(ctx, &other_committee_roles).await?; } let committee_role_id: RoleId = committee.role_id().into(); let delegate_role_id: RoleId = config.delegate_role_id().into(); let mut intended_roles = HashSet::with_capacity(2); intended_roles.insert(committee_role_id); intended_roles.insert(delegate_role_id); let roles_to_add: Vec<_> = intended_roles .difference(&member_role_ids) .cloned() .collect(); if!roles_to_add.is_empty() { member.add_roles(ctx, &roles_to_add).await?; } msg.react(ctx, POSITIVE_REACTION).await?; Ok(()) } #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let config_file = File::open("config.ron").context("missing config file")?; let config: Config = ron::de::from_reader(config_file).context("invalid config file")?; let bot_id = Http::new_with_token(config.token()) .get_current_application_info() .await? .id; let framework = StandardFramework::new() .configure(|c| { c.no_dm_prefix(true) .with_whitespace(true) .on_mention(Some(bot_id)) }) .group(&RELAY_GROUP) .group(&ROLE_GROUP); let mut client = Client::builder(config.token()) .event_handler(Handler) .framework(framework) .intents( GatewayIntents::DIRECT_MESSAGES | GatewayIntents::DIRECT_MESSAGE_TYPING | GatewayIntents::DIRECT_MESSAGE_REACTIONS | GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::GUILD_MESSAGE_TYPING | GatewayIntents::GUILD_MESSAGE_REACTIONS | GatewayIntents::GUILD_MEMBERS, ) .await .context("failed to create client")?; { let mut data = client.data.write().await; data.insert::<ConfigContainer>(Arc::new(config)); } client.start().await.context("failed to start client")?; Ok(()) }
et mut iter = text.splitn(2, pat); Some((iter.next()?, iter.next()?)) } async
identifier_body
pars_upload.rs
use crate::{ create_manager::models::BlobMetadata, document_manager::{accept_job, check_in_document_handler, delete_document_handler}, models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier}, multipart_form_data::{collect_fields, Field}, state_manager::{with_state, JobStatusClient, StateManager}, storage_client::{models::StorageFile, AzureBlobStorage, StorageClient}, }; use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, str::FromStr}; use uuid::Uuid; use warp::{ http::{header, Method}, multipart::FormData, Filter, Rejection, Reply, }; pub fn update_handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[header::AUTHORIZATION]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars" / String) .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(update_pars_handler) .with(cors) } pub fn handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[ header::AUTHORIZATION, header::HeaderName::from_bytes(b"username").unwrap(), ]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars") .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(upload_pars_handler) .with(cors) } async fn add_file_to_temporary_blob_storage( _job_id: Uuid, file_data: &[u8], licence_number: &str, ) -> Result<StorageFile, SubmissionError> { let storage_client = AzureBlobStorage::temporary(); let storage_file = storage_client .add_file(file_data, licence_number, HashMap::new()) .await .map_err(|e| SubmissionError::BlobStorageError { message: format!("Problem talking to temporary blob storage: {:?}", e), })?; Ok(storage_file)
} fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document { Document { id: metadata.file_name.to_string(), name: metadata.title.to_string(), document_type: DocumentType::Par, author: metadata.author.to_string(), products: metadata.product_names.to_vec_string(), keywords: match metadata.keywords { Some(a) => Some(a.to_vec_string()), None => None, }, pl_number: metadata.pl_number, territory: metadata.territory, active_substances: metadata.active_substances.to_vec_string(), file_source: FileSource::TemporaryAzureBlobStorage, file_path: storage_file.name, } } async fn queue_pars_upload( form_data: FormData, uploader_email: String, state_manager: impl JobStatusClient, ) -> Result<Vec<Uuid>, Rejection> { let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| { tracing::debug!("Error reading PARS upload: {:?}", e); warp::reject::custom(e) })?; let mut job_ids = Vec::with_capacity(metadatas.len()); for metadata in metadatas { let job_id = accept_job(&state_manager).await?.id; job_ids.push(job_id); let storage_file = add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number) .await .map_err(warp::reject::custom)?; let document = document_from_form_data(storage_file, metadata); check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?; } Ok(job_ids) } async fn update_pars_handler( existing_par_identifier: String, form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let delete = delete_document_handler( UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier), &state_manager, Some(username.clone()), ) .await?; let upload = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UpdateResponse { delete, upload })) } async fn upload_pars_handler( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UploadResponse { job_ids })) } async fn queue_upload_pars_job( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<Vec<Uuid>, Rejection> { let request_id = Uuid::new_v4(); let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str()); let _enter = span.enter(); tracing::debug!("Received PARS submission"); tracing::info!("Uploader email: {}", username); Ok(queue_pars_upload(form_data, username, state_manager).await?) } #[derive(Debug, Serialize)] struct UploadResponse { job_ids: Vec<Uuid>, } #[derive(Debug, Serialize)] struct UpdateResponse { delete: JobStatusResponse, upload: Vec<Uuid>, } async fn read_pars_upload( form_data: FormData, ) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> { let fields = collect_fields(form_data) .await .map_err(|error| SubmissionError::UploadError { error })?; let GroupedFields { products, file_name, file_data, } = groups_fields_by_product(fields)?; let metadatas = products .into_iter() .map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields)) .collect::<Result<_, _>>()?; Ok((metadatas, file_data)) } #[derive(Debug)] struct GroupedFields { products: Vec<Vec<Field>>, file_name: String, file_data: Vec<u8>, } fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> { let mut products = Vec::new(); let mut file_field = None; for field in fields { if field.name == "file" { file_field = Some(field.value); continue; } if field.name == "product_name" { products.push(vec![]); } match products.last_mut() { Some(group) => { group.push(field); } None => { let group = vec![field]; products.push(group); } } } let file_name = file_field .as_ref() .and_then(|field| field.file_name()) .ok_or(SubmissionError::MissingField { name: "file" })? .to_string(); let file_data = file_field .and_then(|field| field.into_file_data()) .ok_or(SubmissionError::MissingField { name: "file" })?; Ok(GroupedFields { products, file_name, file_data, }) } fn product_form_data_to_blob_metadata( file_name: String, fields: Vec<Field>, ) -> Result<BlobMetadata, SubmissionError> { let product_name = get_field_as_uppercase_string(&fields, "product_name")?; let product_names = vec![product_name]; let title = get_field_as_uppercase_string(&fields, "title")?; let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?; let active_substances = fields .iter() .filter(|field| field.name == "active_substance") .filter_map(|field| field.value.value()) .map(|s| s.to_uppercase()) .collect::<Vec<String>>(); let territory = fields .iter() .find(|field| field.name == "territory") .and_then(|field| field.value.value()) .map(|s| TerritoryType::from_str(s)) .transpose()?; let author = "".to_string(); Ok(BlobMetadata::new( file_name, DocumentType::Par, title, pl_number, territory, product_names, active_substances, author, None, )) } fn get_field_as_uppercase_string( fields: &[Field], field_name: &'static str, ) -> Result<String, SubmissionError> { fields .iter() .find(|field| field.name == field_name) .and_then(|field| field.value.value()) .ok_or(SubmissionError::MissingField { name: field_name }) .map(|s| s.to_uppercase()) } #[derive(Debug)] enum SubmissionError { UploadError { error: anyhow::Error, }, BlobStorageError { message: String, // should maybe be StorageClientError but that is not // Send + Sync so then we can't implement warp::reject::Reject }, MissingField { name: &'static str, }, UnknownTerritoryType { error: TerritoryTypeParseError, }, } impl From<TerritoryTypeParseError> for SubmissionError { fn from(error: TerritoryTypeParseError) -> Self { SubmissionError::UnknownTerritoryType { error } } } impl warp::reject::Reject for SubmissionError {} #[derive(Debug, Serialize, Deserialize)] struct Claims { sub: String, preferred_username: String, } #[cfg(test)] mod tests { use super::*; use crate::multipart_form_data::UploadFieldValue; use pretty_assertions::assert_eq; fn text_field(name: &str, value: &str) -> Field { Field { name: name.into(), value: UploadFieldValue::Text { value: value.into(), }, } } #[test] fn converts_form_data_to_metadata() { let file_name = "file"; let result = product_form_data_to_blob_metadata( file_name.into(), vec![ text_field("product_name", "Feel good pills"), text_field("active_substance", "Ibuprofen"), text_field("active_substance", "Temazepam"), text_field( "title", "Feel good pills Really Strong High Dose THR 12345/1234", ), text_field("licence_number", "THR 12345/1234"), text_field("territory", "UK"), ], ) .unwrap(); assert_eq!( result, BlobMetadata { file_name: file_name.into(), doc_type: DocumentType::Par, title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(), pl_number: "THR 12345/1234".into(), territory: Some(TerritoryType::UK), product_names: vec!["FEEL GOOD PILLS".into()].into(), active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(), author: "".into(), keywords: None } ) } }
random_line_split
pars_upload.rs
use crate::{ create_manager::models::BlobMetadata, document_manager::{accept_job, check_in_document_handler, delete_document_handler}, models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier}, multipart_form_data::{collect_fields, Field}, state_manager::{with_state, JobStatusClient, StateManager}, storage_client::{models::StorageFile, AzureBlobStorage, StorageClient}, }; use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, str::FromStr}; use uuid::Uuid; use warp::{ http::{header, Method}, multipart::FormData, Filter, Rejection, Reply, }; pub fn update_handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[header::AUTHORIZATION]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars" / String) .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(update_pars_handler) .with(cors) } pub fn handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[ header::AUTHORIZATION, header::HeaderName::from_bytes(b"username").unwrap(), ]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars") .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(upload_pars_handler) .with(cors) } async fn add_file_to_temporary_blob_storage( _job_id: Uuid, file_data: &[u8], licence_number: &str, ) -> Result<StorageFile, SubmissionError> { let storage_client = AzureBlobStorage::temporary(); let storage_file = storage_client .add_file(file_data, licence_number, HashMap::new()) .await .map_err(|e| SubmissionError::BlobStorageError { message: format!("Problem talking to temporary blob storage: {:?}", e), })?; Ok(storage_file) } fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document { Document { id: metadata.file_name.to_string(), name: metadata.title.to_string(), document_type: DocumentType::Par, author: metadata.author.to_string(), products: metadata.product_names.to_vec_string(), keywords: match metadata.keywords { Some(a) => Some(a.to_vec_string()), None => None, }, pl_number: metadata.pl_number, territory: metadata.territory, active_substances: metadata.active_substances.to_vec_string(), file_source: FileSource::TemporaryAzureBlobStorage, file_path: storage_file.name, } } async fn queue_pars_upload( form_data: FormData, uploader_email: String, state_manager: impl JobStatusClient, ) -> Result<Vec<Uuid>, Rejection> { let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| { tracing::debug!("Error reading PARS upload: {:?}", e); warp::reject::custom(e) })?; let mut job_ids = Vec::with_capacity(metadatas.len()); for metadata in metadatas { let job_id = accept_job(&state_manager).await?.id; job_ids.push(job_id); let storage_file = add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number) .await .map_err(warp::reject::custom)?; let document = document_from_form_data(storage_file, metadata); check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?; } Ok(job_ids) } async fn update_pars_handler( existing_par_identifier: String, form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let delete = delete_document_handler( UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier), &state_manager, Some(username.clone()), ) .await?; let upload = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UpdateResponse { delete, upload })) } async fn upload_pars_handler( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UploadResponse { job_ids })) } async fn queue_upload_pars_job( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<Vec<Uuid>, Rejection> { let request_id = Uuid::new_v4(); let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str()); let _enter = span.enter(); tracing::debug!("Received PARS submission"); tracing::info!("Uploader email: {}", username); Ok(queue_pars_upload(form_data, username, state_manager).await?) } #[derive(Debug, Serialize)] struct UploadResponse { job_ids: Vec<Uuid>, } #[derive(Debug, Serialize)] struct UpdateResponse { delete: JobStatusResponse, upload: Vec<Uuid>, } async fn read_pars_upload( form_data: FormData, ) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> { let fields = collect_fields(form_data) .await .map_err(|error| SubmissionError::UploadError { error })?; let GroupedFields { products, file_name, file_data, } = groups_fields_by_product(fields)?; let metadatas = products .into_iter() .map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields)) .collect::<Result<_, _>>()?; Ok((metadatas, file_data)) } #[derive(Debug)] struct GroupedFields { products: Vec<Vec<Field>>, file_name: String, file_data: Vec<u8>, } fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError>
products.push(group); } } } let file_name = file_field .as_ref() .and_then(|field| field.file_name()) .ok_or(SubmissionError::MissingField { name: "file" })? .to_string(); let file_data = file_field .and_then(|field| field.into_file_data()) .ok_or(SubmissionError::MissingField { name: "file" })?; Ok(GroupedFields { products, file_name, file_data, }) } fn product_form_data_to_blob_metadata( file_name: String, fields: Vec<Field>, ) -> Result<BlobMetadata, SubmissionError> { let product_name = get_field_as_uppercase_string(&fields, "product_name")?; let product_names = vec![product_name]; let title = get_field_as_uppercase_string(&fields, "title")?; let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?; let active_substances = fields .iter() .filter(|field| field.name == "active_substance") .filter_map(|field| field.value.value()) .map(|s| s.to_uppercase()) .collect::<Vec<String>>(); let territory = fields .iter() .find(|field| field.name == "territory") .and_then(|field| field.value.value()) .map(|s| TerritoryType::from_str(s)) .transpose()?; let author = "".to_string(); Ok(BlobMetadata::new( file_name, DocumentType::Par, title, pl_number, territory, product_names, active_substances, author, None, )) } fn get_field_as_uppercase_string( fields: &[Field], field_name: &'static str, ) -> Result<String, SubmissionError> { fields .iter() .find(|field| field.name == field_name) .and_then(|field| field.value.value()) .ok_or(SubmissionError::MissingField { name: field_name }) .map(|s| s.to_uppercase()) } #[derive(Debug)] enum SubmissionError { UploadError { error: anyhow::Error, }, BlobStorageError { message: String, // should maybe be StorageClientError but that is not // Send + Sync so then we can't implement warp::reject::Reject }, MissingField { name: &'static str, }, UnknownTerritoryType { error: TerritoryTypeParseError, }, } impl From<TerritoryTypeParseError> for SubmissionError { fn from(error: TerritoryTypeParseError) -> Self { SubmissionError::UnknownTerritoryType { error } } } impl warp::reject::Reject for SubmissionError {} #[derive(Debug, Serialize, Deserialize)] struct Claims { sub: String, preferred_username: String, } #[cfg(test)] mod tests { use super::*; use crate::multipart_form_data::UploadFieldValue; use pretty_assertions::assert_eq; fn text_field(name: &str, value: &str) -> Field { Field { name: name.into(), value: UploadFieldValue::Text { value: value.into(), }, } } #[test] fn converts_form_data_to_metadata() { let file_name = "file"; let result = product_form_data_to_blob_metadata( file_name.into(), vec![ text_field("product_name", "Feel good pills"), text_field("active_substance", "Ibuprofen"), text_field("active_substance", "Temazepam"), text_field( "title", "Feel good pills Really Strong High Dose THR 12345/1234", ), text_field("licence_number", "THR 12345/1234"), text_field("territory", "UK"), ], ) .unwrap(); assert_eq!( result, BlobMetadata { file_name: file_name.into(), doc_type: DocumentType::Par, title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(), pl_number: "THR 12345/1234".into(), territory: Some(TerritoryType::UK), product_names: vec!["FEEL GOOD PILLS".into()].into(), active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(), author: "".into(), keywords: None } ) } }
{ let mut products = Vec::new(); let mut file_field = None; for field in fields { if field.name == "file" { file_field = Some(field.value); continue; } if field.name == "product_name" { products.push(vec![]); } match products.last_mut() { Some(group) => { group.push(field); } None => { let group = vec![field];
identifier_body
pars_upload.rs
use crate::{ create_manager::models::BlobMetadata, document_manager::{accept_job, check_in_document_handler, delete_document_handler}, models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier}, multipart_form_data::{collect_fields, Field}, state_manager::{with_state, JobStatusClient, StateManager}, storage_client::{models::StorageFile, AzureBlobStorage, StorageClient}, }; use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, str::FromStr}; use uuid::Uuid; use warp::{ http::{header, Method}, multipart::FormData, Filter, Rejection, Reply, }; pub fn update_handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[header::AUTHORIZATION]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars" / String) .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(update_pars_handler) .with(cors) } pub fn handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[ header::AUTHORIZATION, header::HeaderName::from_bytes(b"username").unwrap(), ]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars") .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(upload_pars_handler) .with(cors) } async fn add_file_to_temporary_blob_storage( _job_id: Uuid, file_data: &[u8], licence_number: &str, ) -> Result<StorageFile, SubmissionError> { let storage_client = AzureBlobStorage::temporary(); let storage_file = storage_client .add_file(file_data, licence_number, HashMap::new()) .await .map_err(|e| SubmissionError::BlobStorageError { message: format!("Problem talking to temporary blob storage: {:?}", e), })?; Ok(storage_file) } fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document { Document { id: metadata.file_name.to_string(), name: metadata.title.to_string(), document_type: DocumentType::Par, author: metadata.author.to_string(), products: metadata.product_names.to_vec_string(), keywords: match metadata.keywords { Some(a) => Some(a.to_vec_string()), None => None, }, pl_number: metadata.pl_number, territory: metadata.territory, active_substances: metadata.active_substances.to_vec_string(), file_source: FileSource::TemporaryAzureBlobStorage, file_path: storage_file.name, } } async fn queue_pars_upload( form_data: FormData, uploader_email: String, state_manager: impl JobStatusClient, ) -> Result<Vec<Uuid>, Rejection> { let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| { tracing::debug!("Error reading PARS upload: {:?}", e); warp::reject::custom(e) })?; let mut job_ids = Vec::with_capacity(metadatas.len()); for metadata in metadatas { let job_id = accept_job(&state_manager).await?.id; job_ids.push(job_id); let storage_file = add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number) .await .map_err(warp::reject::custom)?; let document = document_from_form_data(storage_file, metadata); check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?; } Ok(job_ids) } async fn update_pars_handler( existing_par_identifier: String, form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let delete = delete_document_handler( UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier), &state_manager, Some(username.clone()), ) .await?; let upload = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UpdateResponse { delete, upload })) } async fn upload_pars_handler( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UploadResponse { job_ids })) } async fn queue_upload_pars_job( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<Vec<Uuid>, Rejection> { let request_id = Uuid::new_v4(); let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str()); let _enter = span.enter(); tracing::debug!("Received PARS submission"); tracing::info!("Uploader email: {}", username); Ok(queue_pars_upload(form_data, username, state_manager).await?) } #[derive(Debug, Serialize)] struct UploadResponse { job_ids: Vec<Uuid>, } #[derive(Debug, Serialize)] struct UpdateResponse { delete: JobStatusResponse, upload: Vec<Uuid>, } async fn read_pars_upload( form_data: FormData, ) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> { let fields = collect_fields(form_data) .await .map_err(|error| SubmissionError::UploadError { error })?; let GroupedFields { products, file_name, file_data, } = groups_fields_by_product(fields)?; let metadatas = products .into_iter() .map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields)) .collect::<Result<_, _>>()?; Ok((metadatas, file_data)) } #[derive(Debug)] struct GroupedFields { products: Vec<Vec<Field>>, file_name: String, file_data: Vec<u8>, } fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> { let mut products = Vec::new(); let mut file_field = None; for field in fields { if field.name == "file" { file_field = Some(field.value); continue; } if field.name == "product_name" { products.push(vec![]); } match products.last_mut() { Some(group) =>
None => { let group = vec![field]; products.push(group); } } } let file_name = file_field .as_ref() .and_then(|field| field.file_name()) .ok_or(SubmissionError::MissingField { name: "file" })? .to_string(); let file_data = file_field .and_then(|field| field.into_file_data()) .ok_or(SubmissionError::MissingField { name: "file" })?; Ok(GroupedFields { products, file_name, file_data, }) } fn product_form_data_to_blob_metadata( file_name: String, fields: Vec<Field>, ) -> Result<BlobMetadata, SubmissionError> { let product_name = get_field_as_uppercase_string(&fields, "product_name")?; let product_names = vec![product_name]; let title = get_field_as_uppercase_string(&fields, "title")?; let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?; let active_substances = fields .iter() .filter(|field| field.name == "active_substance") .filter_map(|field| field.value.value()) .map(|s| s.to_uppercase()) .collect::<Vec<String>>(); let territory = fields .iter() .find(|field| field.name == "territory") .and_then(|field| field.value.value()) .map(|s| TerritoryType::from_str(s)) .transpose()?; let author = "".to_string(); Ok(BlobMetadata::new( file_name, DocumentType::Par, title, pl_number, territory, product_names, active_substances, author, None, )) } fn get_field_as_uppercase_string( fields: &[Field], field_name: &'static str, ) -> Result<String, SubmissionError> { fields .iter() .find(|field| field.name == field_name) .and_then(|field| field.value.value()) .ok_or(SubmissionError::MissingField { name: field_name }) .map(|s| s.to_uppercase()) } #[derive(Debug)] enum SubmissionError { UploadError { error: anyhow::Error, }, BlobStorageError { message: String, // should maybe be StorageClientError but that is not // Send + Sync so then we can't implement warp::reject::Reject }, MissingField { name: &'static str, }, UnknownTerritoryType { error: TerritoryTypeParseError, }, } impl From<TerritoryTypeParseError> for SubmissionError { fn from(error: TerritoryTypeParseError) -> Self { SubmissionError::UnknownTerritoryType { error } } } impl warp::reject::Reject for SubmissionError {} #[derive(Debug, Serialize, Deserialize)] struct Claims { sub: String, preferred_username: String, } #[cfg(test)] mod tests { use super::*; use crate::multipart_form_data::UploadFieldValue; use pretty_assertions::assert_eq; fn text_field(name: &str, value: &str) -> Field { Field { name: name.into(), value: UploadFieldValue::Text { value: value.into(), }, } } #[test] fn converts_form_data_to_metadata() { let file_name = "file"; let result = product_form_data_to_blob_metadata( file_name.into(), vec![ text_field("product_name", "Feel good pills"), text_field("active_substance", "Ibuprofen"), text_field("active_substance", "Temazepam"), text_field( "title", "Feel good pills Really Strong High Dose THR 12345/1234", ), text_field("licence_number", "THR 12345/1234"), text_field("territory", "UK"), ], ) .unwrap(); assert_eq!( result, BlobMetadata { file_name: file_name.into(), doc_type: DocumentType::Par, title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(), pl_number: "THR 12345/1234".into(), territory: Some(TerritoryType::UK), product_names: vec!["FEEL GOOD PILLS".into()].into(), active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(), author: "".into(), keywords: None } ) } }
{ group.push(field); }
conditional_block
pars_upload.rs
use crate::{ create_manager::models::BlobMetadata, document_manager::{accept_job, check_in_document_handler, delete_document_handler}, models::{Document, FileSource, JobStatusResponse, UniqueDocumentIdentifier}, multipart_form_data::{collect_fields, Field}, state_manager::{with_state, JobStatusClient, StateManager}, storage_client::{models::StorageFile, AzureBlobStorage, StorageClient}, }; use search_client::models::{DocumentType, TerritoryType, TerritoryTypeParseError}; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, str::FromStr}; use uuid::Uuid; use warp::{ http::{header, Method}, multipart::FormData, Filter, Rejection, Reply, }; pub fn update_handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[header::AUTHORIZATION]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars" / String) .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(update_pars_handler) .with(cors) } pub fn handler( state_manager: StateManager, pars_origin: &str, ) -> impl Filter<Extract = impl Reply, Error = Rejection> + Clone { let cors = warp::cors() .allow_origin(pars_origin) .allow_headers(&[ header::AUTHORIZATION, header::HeaderName::from_bytes(b"username").unwrap(), ]) .allow_methods(&[Method::POST]) .build(); warp::path!("pars") .and(warp::post()) // Max upload size is set to a very high limit here as the actual limit should be managed using istio .and(warp::multipart::form().max_length(1000 * 1024 * 1024)) .and(with_state(state_manager)) .and(warp::header("username")) .and_then(upload_pars_handler) .with(cors) } async fn add_file_to_temporary_blob_storage( _job_id: Uuid, file_data: &[u8], licence_number: &str, ) -> Result<StorageFile, SubmissionError> { let storage_client = AzureBlobStorage::temporary(); let storage_file = storage_client .add_file(file_data, licence_number, HashMap::new()) .await .map_err(|e| SubmissionError::BlobStorageError { message: format!("Problem talking to temporary blob storage: {:?}", e), })?; Ok(storage_file) } fn document_from_form_data(storage_file: StorageFile, metadata: BlobMetadata) -> Document { Document { id: metadata.file_name.to_string(), name: metadata.title.to_string(), document_type: DocumentType::Par, author: metadata.author.to_string(), products: metadata.product_names.to_vec_string(), keywords: match metadata.keywords { Some(a) => Some(a.to_vec_string()), None => None, }, pl_number: metadata.pl_number, territory: metadata.territory, active_substances: metadata.active_substances.to_vec_string(), file_source: FileSource::TemporaryAzureBlobStorage, file_path: storage_file.name, } } async fn queue_pars_upload( form_data: FormData, uploader_email: String, state_manager: impl JobStatusClient, ) -> Result<Vec<Uuid>, Rejection> { let (metadatas, file_data) = read_pars_upload(form_data).await.map_err(|e| { tracing::debug!("Error reading PARS upload: {:?}", e); warp::reject::custom(e) })?; let mut job_ids = Vec::with_capacity(metadatas.len()); for metadata in metadatas { let job_id = accept_job(&state_manager).await?.id; job_ids.push(job_id); let storage_file = add_file_to_temporary_blob_storage(job_id, &file_data, &metadata.pl_number) .await .map_err(warp::reject::custom)?; let document = document_from_form_data(storage_file, metadata); check_in_document_handler(document, &state_manager, Some(uploader_email.clone())).await?; } Ok(job_ids) } async fn update_pars_handler( existing_par_identifier: String, form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let delete = delete_document_handler( UniqueDocumentIdentifier::MetadataStorageName(existing_par_identifier), &state_manager, Some(username.clone()), ) .await?; let upload = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UpdateResponse { delete, upload })) } async fn upload_pars_handler( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<impl Reply, Rejection> { let job_ids = queue_upload_pars_job(form_data, state_manager, username).await?; Ok(warp::reply::json(&UploadResponse { job_ids })) } async fn queue_upload_pars_job( form_data: FormData, state_manager: StateManager, username: String, ) -> Result<Vec<Uuid>, Rejection> { let request_id = Uuid::new_v4(); let span = tracing::info_span!("PARS upload", request_id = request_id.to_string().as_str()); let _enter = span.enter(); tracing::debug!("Received PARS submission"); tracing::info!("Uploader email: {}", username); Ok(queue_pars_upload(form_data, username, state_manager).await?) } #[derive(Debug, Serialize)] struct
{ job_ids: Vec<Uuid>, } #[derive(Debug, Serialize)] struct UpdateResponse { delete: JobStatusResponse, upload: Vec<Uuid>, } async fn read_pars_upload( form_data: FormData, ) -> Result<(Vec<BlobMetadata>, Vec<u8>), SubmissionError> { let fields = collect_fields(form_data) .await .map_err(|error| SubmissionError::UploadError { error })?; let GroupedFields { products, file_name, file_data, } = groups_fields_by_product(fields)?; let metadatas = products .into_iter() .map(|fields| product_form_data_to_blob_metadata(file_name.clone(), fields)) .collect::<Result<_, _>>()?; Ok((metadatas, file_data)) } #[derive(Debug)] struct GroupedFields { products: Vec<Vec<Field>>, file_name: String, file_data: Vec<u8>, } fn groups_fields_by_product(fields: Vec<Field>) -> Result<GroupedFields, SubmissionError> { let mut products = Vec::new(); let mut file_field = None; for field in fields { if field.name == "file" { file_field = Some(field.value); continue; } if field.name == "product_name" { products.push(vec![]); } match products.last_mut() { Some(group) => { group.push(field); } None => { let group = vec![field]; products.push(group); } } } let file_name = file_field .as_ref() .and_then(|field| field.file_name()) .ok_or(SubmissionError::MissingField { name: "file" })? .to_string(); let file_data = file_field .and_then(|field| field.into_file_data()) .ok_or(SubmissionError::MissingField { name: "file" })?; Ok(GroupedFields { products, file_name, file_data, }) } fn product_form_data_to_blob_metadata( file_name: String, fields: Vec<Field>, ) -> Result<BlobMetadata, SubmissionError> { let product_name = get_field_as_uppercase_string(&fields, "product_name")?; let product_names = vec![product_name]; let title = get_field_as_uppercase_string(&fields, "title")?; let pl_number = get_field_as_uppercase_string(&fields, "licence_number")?; let active_substances = fields .iter() .filter(|field| field.name == "active_substance") .filter_map(|field| field.value.value()) .map(|s| s.to_uppercase()) .collect::<Vec<String>>(); let territory = fields .iter() .find(|field| field.name == "territory") .and_then(|field| field.value.value()) .map(|s| TerritoryType::from_str(s)) .transpose()?; let author = "".to_string(); Ok(BlobMetadata::new( file_name, DocumentType::Par, title, pl_number, territory, product_names, active_substances, author, None, )) } fn get_field_as_uppercase_string( fields: &[Field], field_name: &'static str, ) -> Result<String, SubmissionError> { fields .iter() .find(|field| field.name == field_name) .and_then(|field| field.value.value()) .ok_or(SubmissionError::MissingField { name: field_name }) .map(|s| s.to_uppercase()) } #[derive(Debug)] enum SubmissionError { UploadError { error: anyhow::Error, }, BlobStorageError { message: String, // should maybe be StorageClientError but that is not // Send + Sync so then we can't implement warp::reject::Reject }, MissingField { name: &'static str, }, UnknownTerritoryType { error: TerritoryTypeParseError, }, } impl From<TerritoryTypeParseError> for SubmissionError { fn from(error: TerritoryTypeParseError) -> Self { SubmissionError::UnknownTerritoryType { error } } } impl warp::reject::Reject for SubmissionError {} #[derive(Debug, Serialize, Deserialize)] struct Claims { sub: String, preferred_username: String, } #[cfg(test)] mod tests { use super::*; use crate::multipart_form_data::UploadFieldValue; use pretty_assertions::assert_eq; fn text_field(name: &str, value: &str) -> Field { Field { name: name.into(), value: UploadFieldValue::Text { value: value.into(), }, } } #[test] fn converts_form_data_to_metadata() { let file_name = "file"; let result = product_form_data_to_blob_metadata( file_name.into(), vec![ text_field("product_name", "Feel good pills"), text_field("active_substance", "Ibuprofen"), text_field("active_substance", "Temazepam"), text_field( "title", "Feel good pills Really Strong High Dose THR 12345/1234", ), text_field("licence_number", "THR 12345/1234"), text_field("territory", "UK"), ], ) .unwrap(); assert_eq!( result, BlobMetadata { file_name: file_name.into(), doc_type: DocumentType::Par, title: "FEEL GOOD PILLS REALLY STRONG HIGH DOSE THR 12345/1234".into(), pl_number: "THR 12345/1234".into(), territory: Some(TerritoryType::UK), product_names: vec!["FEEL GOOD PILLS".into()].into(), active_substances: vec!["IBUPROFEN".into(), "TEMAZEPAM".into()].into(), author: "".into(), keywords: None } ) } }
UploadResponse
identifier_name
main.rs
/// easiGrow /// /// by Paul White (Nov 2014--2017) /// written in rust (www.rust-lang.org) /// /// A program to match crack growth predictions to measurements. /// /// The program calculates fatigue crack growth rates and finds the /// optimum parameters of a crack growth model to match predictions /// with measurements. /// /// **easiGrow** is a standalone program but most of the calculations /// are done through calls to the associated **fatigue** library which /// is included. The main program is for doing anything that /// explicitly uses the command line flags inlcuding the optimisation /// module. These flages are used to build the **EasiOptions** data /// structure which is then used to generate the crack growth /// history. The optimisation generates a crack growth curve which it /// compares with a fractography file. It finds the error between /// these measurements and tries to minimise the sum errors through /// minimisation routines. /// /// Currently, none of the models has a memory effect, so it is ok to /// just start growing the crack from an iniital crack size that is /// smaller than the initial fracto data. The struct `grow::CrackState` /// also contains parameters that are passed along with the applied /// loading _kmin_ and _kmax_, so any memory variables should be added to /// this struct and will be availabe to be used by the _da/dn_ equation. /// The simplest memory effect that is included in the `CrackState` /// data is the plastic zone size, but there are no dadn equations /// currently using this. The memory effect does not appear to be /// strong in AA7050 material. /// /// Think of the program flow as /// /// 1. Read in data /// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles /// 3. Filter the list of cycles /// 4. If required, optimise any parameters /// 5. Perform a crack growth calculation /// 6. Write out requested output #[macro_use] extern crate clap; extern crate fatigue; extern crate log; extern crate env_logger; use std::f64::consts::FRAC_PI_2; use std::process; use std::collections::BTreeSet; use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag}; use options_clap::get_options_clap; use options::{OptimMethod, TerminatingOutput}; use fatigue::dadn::DaDn; use fatigue::COMMENT; use std::error::Error; use std::fs::File; use std::path::Path; use log::error; use std::io::Write; mod list; mod optimise; mod sweep; mod factors; mod options; mod options_clap; mod nelder; mod numbers; mod vector; #[cfg(feature = "GSL")] mod optimise_gsl; fn main() { env_logger::init(); // get all the data let materials = material::get_all_dadns(); let mut options = options::get_default_options(); get_options_clap("", &mut options); println!("{}easiGrow: version {}", COMMENT, crate_version!()); println!("{}", COMMENT); if options.verbosity == options::Verbosity::Verbose { println!("{}Options: ", COMMENT); println!("{}", options); } options::read_all_files(&mut options); // process all the modifications to the sequence options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods); // Get the cycles from either the external sequence file, command line or the cycle file. if options.cycle_infile!= "" && options.seq_infile!= "" { error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.", options.seq_infile, options.cycle_infile); std::process::exit(2) } let unclosed = if options.cycle_infile == "" { let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method); options.cycles = cycles; left } else { Vec::new() }; // process all the modifications to the cycles options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods); // Only keep those cycles that remain after filtering the cycles // and mark the turning points associated with those cycles. This // section is only for writing out the modified sequence, since // the filtered cycles are all that is used for crack growth. if options.seq_mods.cycles { let mut keep = vec![false; options.sequence.len()]; for cycle in &options.cycles { keep[cycle.max.index] = true; keep[cycle.min.index] = true; } options.sequence.retain(|s| keep[s.index]) } // Any request for file or info output will result in program // termination. This policy is to reduce the complexity for the // user as to what the program does. // Write out the sequence file. if let Some(outfile) = options.seq_mods.outfile { io::write_sequence(&outfile, &options.sequence); std::process::exit(0); } // Write out the cycles file. if let Some(outfile) = options.cycle_mods.outfile { io::write_cycles(&outfile, &options.cycles); std::process::exit(0); } // write out the beta by converting to a beta table. This can be // then read back in using the file: option for beta selection. if options.beta_outfile!= "" { let beta = beta::get_beta_fn(&options.beta, &options.component); let table_beta = beta.as_table(); // need to write to file let path = Path::new(&options.beta_outfile); let display = path.display(); let mut file = match File::create(&path) { // The `description` method of `io::Error` returns a string that // describes the error Err(why) => { error!( "Error: could not create the file '{}': {}.", display, Error::description(&why) ); std::process::exit(1) } Ok(file) => file, }; let _ = write!(file, "{}", table_beta); std::process::exit(0); } // write out summary information of the sequence match options.output { TerminatingOutput::Summary => { let seq_source = if options.seq_infile!= "" { options.seq_infile } else { // This is a little vague as the sequence could be either // the default sequence or overwritten with a supplied sequence. String::from("(Used specified sequence)") }; cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods); let cycle_source = if options.cycle_infile!= "" { options.cycle_infile } else { format!( "(Obtained from sequence using '{:?}' method)", options.cycle_method ) }; cycle::summarise_cycles( &cycle_source, &options.cycles, &unclosed, &options.cycle_mods, ); std::process::exit(0) } // write out extended list of options and methods TerminatingOutput::List => { list::print_list(); std::process::exit(0); } _ => (), } // get the correct material parameters for the dadn equation or // from the command line. If the params are not given, then get the // dadn material constants from the internal database. let mut params = options.params.clone(); if params.is_empty() { // extract out the appropriate material parameters from a file params = if options.dadn.starts_with("file:") { let filename = options.dadn.trim_start_matches("file:"); println!( "{}No parameters given, using the dk values in the dadn file {}", COMMENT, filename ); let table = table::Table::read_file(filename, true); // collapse down the dks and use these as the parameters for optimising table.variables() // or from the internal database. } else { println!( "{}No parameters given, obtaining from material library for {}", COMMENT, options.dadn ); match materials.iter().find(|m| options.dadn.starts_with(m.name)) { Some(m) => m.eqn.variables(), None => { error!("Error: Unknown dadn model {}", options.dadn); process::exit(1); } } } }; // Optimise the parameters to match the predicted crack growth // rates with the associated measured crack growth rates. if options.optimise.file!= "" { // optimisation scaling factors options.params = params.clone(); println!( "{}Now starting the optimisation with params {:?}...", COMMENT, options.params ); let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation optimise_error(&options, &mut factors); println!("{}...finished the optimisation. ", COMMENT); println!("{}The normalised factors are {:?}", COMMENT, factors); // Rescale the parameters to include the optimised factors params = options .params .iter() .zip(factors) .map(|(p, f)| p * f) .collect::<Vec<f64>>(); println!("{}The scaled optimised factors are: {:?}", COMMENT, params); if options.scale == 0.0 { std::process::exit(0); // not an error if we have performed an optimisation } } // Grow the crack let history_all = generate_crack_history(&options, &params); // Lastly, now that we've grown the crack, check if we need to // generate and write out a pseudo image. if options.image.file!= "" { println!("Making a pseudo image..."); if options.image.file.ends_with(".svg") { fracto::write_svg_pseudo_image(&history_all, &options.image); println!("Image written to file '{}'", options.image.file); } else { error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix"); } } } #[cfg(not(feature = "GSL"))] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors) } }; } #[cfg(feature = "GSL")] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64])
// Finally grow the crack with the current parameters which may have been optimised. // We exit here if the scale has not been set. Otherwise we // would go through and do a default calculation which confuses // people if they just want to start the program to see how to get // help. fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> { let dadn_eqn = dadn::make_model(&options.dadn, &params, String::from("unknown")); println!("{}da/dN equation: {}", COMMENT, dadn_eqn); let beta = beta::get_beta_fn(&options.beta, &options.component); if options.scale == 0.0 { error!( "Error: The sequence scale factor is 0. You need to set the scale factor (i.e. load or stress level) in order to perform a crack growth calculation. Try\n easigrow --help" ); std::process::exit(1); } if options.cycles.is_empty() { println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r"); std::process::exit(1); } // We define the initial state. If any memory effect is to be // included in the crack growth model, the meory should be in this // data structure. let init_crack = grow::CrackState::new(options.a.clone()); let mut history_all = Vec::new(); grow::display_history_header(&options.output_vars); // Non-dimensional ratios for beta factor let c = options.a[options.a.len() - 1]; let a_on_c = options.a[0] / c; let a_on_d = options.a[0] / options.component.forward; let c_on_b = c / options.component.sideways; let a_on_r = options.a[0] / options.component.radius; // phis is a vector of angles around the crack front. It depends // on the beta whether any or all of the angles are used. Most // just use the first and some use the last as well. let phis = vec![0.0, FRAC_PI_2]; // Initialise the history let init_history = grow::History { block: 0.0, stress: 0.0, cycle: cycle::Cycle { max: tag::Tag { value: 0.0, index: 0, }, min: tag::Tag { value: 0.0, index: 0, }, }, k: vec![0.0, 0.0], dk: vec![0.0, 0.0], beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis), da: vec![0.0, 0.0], crack: init_crack, }; grow::display_history_line(&init_history, &options.output_vars, &options.component); let component = grow::FatigueTest { history: init_history, component: options.component.clone(), scale: options.scale, cycles: options.cycles.clone(), a_limit: options.a_limit.clone(), block_limit: options.block_limit, next_cycle: 0, dadn: dadn_eqn, beta, output_vars: options.output_vars.clone(), }; // make a hash set of the lines that are required for output let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect(); // if there are no lines in the output then put in the line for the first cycle if options .cycles .iter() .filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index)) .count() == 0 { println!("output_lines {:?}", output_lines); println!( " Warning: There are no sequence lines in the cycle list and so there will be no crack growth output. Consider closing up cycles with re-order to use all sequence lines or include specific sequence lines that are in the cycle. Meanwhile, the output will be for the squence line in the first cycle at line {}.", options.cycles[0].max.index ); output_lines.insert(options.cycles[0].max.index); } // Start the crack growth. This loop steps through each cycle // repeating the cycles until a terminating condition stops the // growth and ends the for loop. for (cycle_no, history) in component.enumerate() { if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) { grow::display_history_line(&history, &options.output_vars, &options.component); } // Only keep the history if we are producing a fracto image. if options.image.file!= "" { history_all.push(history); } } history_all }
{ match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors), OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors); optimise_gsl::gsl_match_crack(options, &mut factors) } }; }
identifier_body
main.rs
/// easiGrow /// /// by Paul White (Nov 2014--2017) /// written in rust (www.rust-lang.org) /// /// A program to match crack growth predictions to measurements. /// /// The program calculates fatigue crack growth rates and finds the /// optimum parameters of a crack growth model to match predictions /// with measurements. /// /// **easiGrow** is a standalone program but most of the calculations /// are done through calls to the associated **fatigue** library which /// is included. The main program is for doing anything that /// explicitly uses the command line flags inlcuding the optimisation /// module. These flages are used to build the **EasiOptions** data /// structure which is then used to generate the crack growth /// history. The optimisation generates a crack growth curve which it /// compares with a fractography file. It finds the error between /// these measurements and tries to minimise the sum errors through /// minimisation routines. /// /// Currently, none of the models has a memory effect, so it is ok to /// just start growing the crack from an iniital crack size that is /// smaller than the initial fracto data. The struct `grow::CrackState` /// also contains parameters that are passed along with the applied /// loading _kmin_ and _kmax_, so any memory variables should be added to /// this struct and will be availabe to be used by the _da/dn_ equation. /// The simplest memory effect that is included in the `CrackState` /// data is the plastic zone size, but there are no dadn equations /// currently using this. The memory effect does not appear to be /// strong in AA7050 material. /// /// Think of the program flow as /// /// 1. Read in data /// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles /// 3. Filter the list of cycles /// 4. If required, optimise any parameters /// 5. Perform a crack growth calculation /// 6. Write out requested output #[macro_use] extern crate clap; extern crate fatigue; extern crate log; extern crate env_logger; use std::f64::consts::FRAC_PI_2; use std::process; use std::collections::BTreeSet; use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag}; use options_clap::get_options_clap; use options::{OptimMethod, TerminatingOutput}; use fatigue::dadn::DaDn; use fatigue::COMMENT; use std::error::Error; use std::fs::File; use std::path::Path; use log::error; use std::io::Write; mod list; mod optimise; mod sweep; mod factors; mod options; mod options_clap; mod nelder; mod numbers; mod vector; #[cfg(feature = "GSL")] mod optimise_gsl; fn main() { env_logger::init(); // get all the data let materials = material::get_all_dadns(); let mut options = options::get_default_options(); get_options_clap("", &mut options); println!("{}easiGrow: version {}", COMMENT, crate_version!()); println!("{}", COMMENT); if options.verbosity == options::Verbosity::Verbose { println!("{}Options: ", COMMENT); println!("{}", options); } options::read_all_files(&mut options); // process all the modifications to the sequence options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods); // Get the cycles from either the external sequence file, command line or the cycle file. if options.cycle_infile!= "" && options.seq_infile!= "" { error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.", options.seq_infile, options.cycle_infile); std::process::exit(2) } let unclosed = if options.cycle_infile == "" { let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method); options.cycles = cycles; left } else { Vec::new() }; // process all the modifications to the cycles options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods); // Only keep those cycles that remain after filtering the cycles // and mark the turning points associated with those cycles. This // section is only for writing out the modified sequence, since // the filtered cycles are all that is used for crack growth. if options.seq_mods.cycles { let mut keep = vec![false; options.sequence.len()]; for cycle in &options.cycles { keep[cycle.max.index] = true; keep[cycle.min.index] = true; } options.sequence.retain(|s| keep[s.index]) } // Any request for file or info output will result in program // termination. This policy is to reduce the complexity for the // user as to what the program does. // Write out the sequence file. if let Some(outfile) = options.seq_mods.outfile { io::write_sequence(&outfile, &options.sequence); std::process::exit(0); } // Write out the cycles file. if let Some(outfile) = options.cycle_mods.outfile { io::write_cycles(&outfile, &options.cycles); std::process::exit(0); } // write out the beta by converting to a beta table. This can be // then read back in using the file: option for beta selection. if options.beta_outfile!= "" { let beta = beta::get_beta_fn(&options.beta, &options.component); let table_beta = beta.as_table(); // need to write to file let path = Path::new(&options.beta_outfile); let display = path.display(); let mut file = match File::create(&path) { // The `description` method of `io::Error` returns a string that // describes the error Err(why) => { error!( "Error: could not create the file '{}': {}.", display, Error::description(&why) ); std::process::exit(1) } Ok(file) => file, }; let _ = write!(file, "{}", table_beta); std::process::exit(0); } // write out summary information of the sequence match options.output { TerminatingOutput::Summary => { let seq_source = if options.seq_infile!= "" { options.seq_infile } else { // This is a little vague as the sequence could be either // the default sequence or overwritten with a supplied sequence. String::from("(Used specified sequence)") }; cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods); let cycle_source = if options.cycle_infile!= "" { options.cycle_infile } else { format!( "(Obtained from sequence using '{:?}' method)", options.cycle_method ) }; cycle::summarise_cycles( &cycle_source, &options.cycles, &unclosed, &options.cycle_mods, ); std::process::exit(0) } // write out extended list of options and methods TerminatingOutput::List => { list::print_list(); std::process::exit(0); } _ => (), } // get the correct material parameters for the dadn equation or // from the command line. If the params are not given, then get the // dadn material constants from the internal database. let mut params = options.params.clone(); if params.is_empty() { // extract out the appropriate material parameters from a file params = if options.dadn.starts_with("file:") { let filename = options.dadn.trim_start_matches("file:"); println!( "{}No parameters given, using the dk values in the dadn file {}", COMMENT, filename ); let table = table::Table::read_file(filename, true); // collapse down the dks and use these as the parameters for optimising table.variables() // or from the internal database. } else { println!( "{}No parameters given, obtaining from material library for {}", COMMENT, options.dadn ); match materials.iter().find(|m| options.dadn.starts_with(m.name)) { Some(m) => m.eqn.variables(), None => { error!("Error: Unknown dadn model {}", options.dadn); process::exit(1); } } } }; // Optimise the parameters to match the predicted crack growth // rates with the associated measured crack growth rates. if options.optimise.file!= "" { // optimisation scaling factors options.params = params.clone(); println!( "{}Now starting the optimisation with params {:?}...", COMMENT, options.params ); let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation optimise_error(&options, &mut factors); println!("{}...finished the optimisation. ", COMMENT); println!("{}The normalised factors are {:?}", COMMENT, factors); // Rescale the parameters to include the optimised factors params = options .params .iter() .zip(factors) .map(|(p, f)| p * f) .collect::<Vec<f64>>(); println!("{}The scaled optimised factors are: {:?}", COMMENT, params); if options.scale == 0.0 { std::process::exit(0); // not an error if we have performed an optimisation } } // Grow the crack let history_all = generate_crack_history(&options, &params); // Lastly, now that we've grown the crack, check if we need to // generate and write out a pseudo image. if options.image.file!= "" { println!("Making a pseudo image..."); if options.image.file.ends_with(".svg") { fracto::write_svg_pseudo_image(&history_all, &options.image); println!("Image written to file '{}'", options.image.file); } else { error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix"); } } } #[cfg(not(feature = "GSL"))] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors) } }; } #[cfg(feature = "GSL")]
OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors), OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors); optimise_gsl::gsl_match_crack(options, &mut factors) } }; } // Finally grow the crack with the current parameters which may have been optimised. // We exit here if the scale has not been set. Otherwise we // would go through and do a default calculation which confuses // people if they just want to start the program to see how to get // help. fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> { let dadn_eqn = dadn::make_model(&options.dadn, &params, String::from("unknown")); println!("{}da/dN equation: {}", COMMENT, dadn_eqn); let beta = beta::get_beta_fn(&options.beta, &options.component); if options.scale == 0.0 { error!( "Error: The sequence scale factor is 0. You need to set the scale factor (i.e. load or stress level) in order to perform a crack growth calculation. Try\n easigrow --help" ); std::process::exit(1); } if options.cycles.is_empty() { println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r"); std::process::exit(1); } // We define the initial state. If any memory effect is to be // included in the crack growth model, the meory should be in this // data structure. let init_crack = grow::CrackState::new(options.a.clone()); let mut history_all = Vec::new(); grow::display_history_header(&options.output_vars); // Non-dimensional ratios for beta factor let c = options.a[options.a.len() - 1]; let a_on_c = options.a[0] / c; let a_on_d = options.a[0] / options.component.forward; let c_on_b = c / options.component.sideways; let a_on_r = options.a[0] / options.component.radius; // phis is a vector of angles around the crack front. It depends // on the beta whether any or all of the angles are used. Most // just use the first and some use the last as well. let phis = vec![0.0, FRAC_PI_2]; // Initialise the history let init_history = grow::History { block: 0.0, stress: 0.0, cycle: cycle::Cycle { max: tag::Tag { value: 0.0, index: 0, }, min: tag::Tag { value: 0.0, index: 0, }, }, k: vec![0.0, 0.0], dk: vec![0.0, 0.0], beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis), da: vec![0.0, 0.0], crack: init_crack, }; grow::display_history_line(&init_history, &options.output_vars, &options.component); let component = grow::FatigueTest { history: init_history, component: options.component.clone(), scale: options.scale, cycles: options.cycles.clone(), a_limit: options.a_limit.clone(), block_limit: options.block_limit, next_cycle: 0, dadn: dadn_eqn, beta, output_vars: options.output_vars.clone(), }; // make a hash set of the lines that are required for output let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect(); // if there are no lines in the output then put in the line for the first cycle if options .cycles .iter() .filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index)) .count() == 0 { println!("output_lines {:?}", output_lines); println!( " Warning: There are no sequence lines in the cycle list and so there will be no crack growth output. Consider closing up cycles with re-order to use all sequence lines or include specific sequence lines that are in the cycle. Meanwhile, the output will be for the squence line in the first cycle at line {}.", options.cycles[0].max.index ); output_lines.insert(options.cycles[0].max.index); } // Start the crack growth. This loop steps through each cycle // repeating the cycles until a terminating condition stops the // growth and ends the for loop. for (cycle_no, history) in component.enumerate() { if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) { grow::display_history_line(&history, &options.output_vars, &options.component); } // Only keep the history if we are producing a fracto image. if options.image.file!= "" { history_all.push(history); } } history_all }
fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors),
random_line_split
main.rs
/// easiGrow /// /// by Paul White (Nov 2014--2017) /// written in rust (www.rust-lang.org) /// /// A program to match crack growth predictions to measurements. /// /// The program calculates fatigue crack growth rates and finds the /// optimum parameters of a crack growth model to match predictions /// with measurements. /// /// **easiGrow** is a standalone program but most of the calculations /// are done through calls to the associated **fatigue** library which /// is included. The main program is for doing anything that /// explicitly uses the command line flags inlcuding the optimisation /// module. These flages are used to build the **EasiOptions** data /// structure which is then used to generate the crack growth /// history. The optimisation generates a crack growth curve which it /// compares with a fractography file. It finds the error between /// these measurements and tries to minimise the sum errors through /// minimisation routines. /// /// Currently, none of the models has a memory effect, so it is ok to /// just start growing the crack from an iniital crack size that is /// smaller than the initial fracto data. The struct `grow::CrackState` /// also contains parameters that are passed along with the applied /// loading _kmin_ and _kmax_, so any memory variables should be added to /// this struct and will be availabe to be used by the _da/dn_ equation. /// The simplest memory effect that is included in the `CrackState` /// data is the plastic zone size, but there are no dadn equations /// currently using this. The memory effect does not appear to be /// strong in AA7050 material. /// /// Think of the program flow as /// /// 1. Read in data /// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles /// 3. Filter the list of cycles /// 4. If required, optimise any parameters /// 5. Perform a crack growth calculation /// 6. Write out requested output #[macro_use] extern crate clap; extern crate fatigue; extern crate log; extern crate env_logger; use std::f64::consts::FRAC_PI_2; use std::process; use std::collections::BTreeSet; use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag}; use options_clap::get_options_clap; use options::{OptimMethod, TerminatingOutput}; use fatigue::dadn::DaDn; use fatigue::COMMENT; use std::error::Error; use std::fs::File; use std::path::Path; use log::error; use std::io::Write; mod list; mod optimise; mod sweep; mod factors; mod options; mod options_clap; mod nelder; mod numbers; mod vector; #[cfg(feature = "GSL")] mod optimise_gsl; fn main() { env_logger::init(); // get all the data let materials = material::get_all_dadns(); let mut options = options::get_default_options(); get_options_clap("", &mut options); println!("{}easiGrow: version {}", COMMENT, crate_version!()); println!("{}", COMMENT); if options.verbosity == options::Verbosity::Verbose { println!("{}Options: ", COMMENT); println!("{}", options); } options::read_all_files(&mut options); // process all the modifications to the sequence options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods); // Get the cycles from either the external sequence file, command line or the cycle file. if options.cycle_infile!= "" && options.seq_infile!= "" { error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.", options.seq_infile, options.cycle_infile); std::process::exit(2) } let unclosed = if options.cycle_infile == "" { let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method); options.cycles = cycles; left } else { Vec::new() }; // process all the modifications to the cycles options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods); // Only keep those cycles that remain after filtering the cycles // and mark the turning points associated with those cycles. This // section is only for writing out the modified sequence, since // the filtered cycles are all that is used for crack growth. if options.seq_mods.cycles { let mut keep = vec![false; options.sequence.len()]; for cycle in &options.cycles { keep[cycle.max.index] = true; keep[cycle.min.index] = true; } options.sequence.retain(|s| keep[s.index]) } // Any request for file or info output will result in program // termination. This policy is to reduce the complexity for the // user as to what the program does. // Write out the sequence file. if let Some(outfile) = options.seq_mods.outfile { io::write_sequence(&outfile, &options.sequence); std::process::exit(0); } // Write out the cycles file. if let Some(outfile) = options.cycle_mods.outfile { io::write_cycles(&outfile, &options.cycles); std::process::exit(0); } // write out the beta by converting to a beta table. This can be // then read back in using the file: option for beta selection. if options.beta_outfile!= "" { let beta = beta::get_beta_fn(&options.beta, &options.component); let table_beta = beta.as_table(); // need to write to file let path = Path::new(&options.beta_outfile); let display = path.display(); let mut file = match File::create(&path) { // The `description` method of `io::Error` returns a string that // describes the error Err(why) => { error!( "Error: could not create the file '{}': {}.", display, Error::description(&why) ); std::process::exit(1) } Ok(file) => file, }; let _ = write!(file, "{}", table_beta); std::process::exit(0); } // write out summary information of the sequence match options.output { TerminatingOutput::Summary => { let seq_source = if options.seq_infile!= "" { options.seq_infile } else { // This is a little vague as the sequence could be either // the default sequence or overwritten with a supplied sequence. String::from("(Used specified sequence)") }; cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods); let cycle_source = if options.cycle_infile!= "" { options.cycle_infile } else { format!( "(Obtained from sequence using '{:?}' method)", options.cycle_method ) }; cycle::summarise_cycles( &cycle_source, &options.cycles, &unclosed, &options.cycle_mods, ); std::process::exit(0) } // write out extended list of options and methods TerminatingOutput::List => { list::print_list(); std::process::exit(0); } _ => (), } // get the correct material parameters for the dadn equation or // from the command line. If the params are not given, then get the // dadn material constants from the internal database. let mut params = options.params.clone(); if params.is_empty() { // extract out the appropriate material parameters from a file params = if options.dadn.starts_with("file:") { let filename = options.dadn.trim_start_matches("file:"); println!( "{}No parameters given, using the dk values in the dadn file {}", COMMENT, filename ); let table = table::Table::read_file(filename, true); // collapse down the dks and use these as the parameters for optimising table.variables() // or from the internal database. } else { println!( "{}No parameters given, obtaining from material library for {}", COMMENT, options.dadn ); match materials.iter().find(|m| options.dadn.starts_with(m.name)) { Some(m) => m.eqn.variables(), None => { error!("Error: Unknown dadn model {}", options.dadn); process::exit(1); } } } }; // Optimise the parameters to match the predicted crack growth // rates with the associated measured crack growth rates. if options.optimise.file!= "" { // optimisation scaling factors options.params = params.clone(); println!( "{}Now starting the optimisation with params {:?}...", COMMENT, options.params ); let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation optimise_error(&options, &mut factors); println!("{}...finished the optimisation. ", COMMENT); println!("{}The normalised factors are {:?}", COMMENT, factors); // Rescale the parameters to include the optimised factors params = options .params .iter() .zip(factors) .map(|(p, f)| p * f) .collect::<Vec<f64>>(); println!("{}The scaled optimised factors are: {:?}", COMMENT, params); if options.scale == 0.0 { std::process::exit(0); // not an error if we have performed an optimisation } } // Grow the crack let history_all = generate_crack_history(&options, &params); // Lastly, now that we've grown the crack, check if we need to // generate and write out a pseudo image. if options.image.file!= "" { println!("Making a pseudo image..."); if options.image.file.ends_with(".svg") { fracto::write_svg_pseudo_image(&history_all, &options.image); println!("Image written to file '{}'", options.image.file); } else { error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix"); } } } #[cfg(not(feature = "GSL"))] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors) } }; } #[cfg(feature = "GSL")] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors), OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors), OptimMethod::All =>
}; } // Finally grow the crack with the current parameters which may have been optimised. // We exit here if the scale has not been set. Otherwise we // would go through and do a default calculation which confuses // people if they just want to start the program to see how to get // help. fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> { let dadn_eqn = dadn::make_model(&options.dadn, &params, String::from("unknown")); println!("{}da/dN equation: {}", COMMENT, dadn_eqn); let beta = beta::get_beta_fn(&options.beta, &options.component); if options.scale == 0.0 { error!( "Error: The sequence scale factor is 0. You need to set the scale factor (i.e. load or stress level) in order to perform a crack growth calculation. Try\n easigrow --help" ); std::process::exit(1); } if options.cycles.is_empty() { println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r"); std::process::exit(1); } // We define the initial state. If any memory effect is to be // included in the crack growth model, the meory should be in this // data structure. let init_crack = grow::CrackState::new(options.a.clone()); let mut history_all = Vec::new(); grow::display_history_header(&options.output_vars); // Non-dimensional ratios for beta factor let c = options.a[options.a.len() - 1]; let a_on_c = options.a[0] / c; let a_on_d = options.a[0] / options.component.forward; let c_on_b = c / options.component.sideways; let a_on_r = options.a[0] / options.component.radius; // phis is a vector of angles around the crack front. It depends // on the beta whether any or all of the angles are used. Most // just use the first and some use the last as well. let phis = vec![0.0, FRAC_PI_2]; // Initialise the history let init_history = grow::History { block: 0.0, stress: 0.0, cycle: cycle::Cycle { max: tag::Tag { value: 0.0, index: 0, }, min: tag::Tag { value: 0.0, index: 0, }, }, k: vec![0.0, 0.0], dk: vec![0.0, 0.0], beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis), da: vec![0.0, 0.0], crack: init_crack, }; grow::display_history_line(&init_history, &options.output_vars, &options.component); let component = grow::FatigueTest { history: init_history, component: options.component.clone(), scale: options.scale, cycles: options.cycles.clone(), a_limit: options.a_limit.clone(), block_limit: options.block_limit, next_cycle: 0, dadn: dadn_eqn, beta, output_vars: options.output_vars.clone(), }; // make a hash set of the lines that are required for output let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect(); // if there are no lines in the output then put in the line for the first cycle if options .cycles .iter() .filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index)) .count() == 0 { println!("output_lines {:?}", output_lines); println!( " Warning: There are no sequence lines in the cycle list and so there will be no crack growth output. Consider closing up cycles with re-order to use all sequence lines or include specific sequence lines that are in the cycle. Meanwhile, the output will be for the squence line in the first cycle at line {}.", options.cycles[0].max.index ); output_lines.insert(options.cycles[0].max.index); } // Start the crack growth. This loop steps through each cycle // repeating the cycles until a terminating condition stops the // growth and ends the for loop. for (cycle_no, history) in component.enumerate() { if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) { grow::display_history_line(&history, &options.output_vars, &options.component); } // Only keep the history if we are producing a fracto image. if options.image.file!= "" { history_all.push(history); } } history_all }
{ sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors); optimise_gsl::gsl_match_crack(options, &mut factors) }
conditional_block
main.rs
/// easiGrow /// /// by Paul White (Nov 2014--2017) /// written in rust (www.rust-lang.org) /// /// A program to match crack growth predictions to measurements. /// /// The program calculates fatigue crack growth rates and finds the /// optimum parameters of a crack growth model to match predictions /// with measurements. /// /// **easiGrow** is a standalone program but most of the calculations /// are done through calls to the associated **fatigue** library which /// is included. The main program is for doing anything that /// explicitly uses the command line flags inlcuding the optimisation /// module. These flages are used to build the **EasiOptions** data /// structure which is then used to generate the crack growth /// history. The optimisation generates a crack growth curve which it /// compares with a fractography file. It finds the error between /// these measurements and tries to minimise the sum errors through /// minimisation routines. /// /// Currently, none of the models has a memory effect, so it is ok to /// just start growing the crack from an iniital crack size that is /// smaller than the initial fracto data. The struct `grow::CrackState` /// also contains parameters that are passed along with the applied /// loading _kmin_ and _kmax_, so any memory variables should be added to /// this struct and will be availabe to be used by the _da/dn_ equation. /// The simplest memory effect that is included in the `CrackState` /// data is the plastic zone size, but there are no dadn equations /// currently using this. The memory effect does not appear to be /// strong in AA7050 material. /// /// Think of the program flow as /// /// 1. Read in data /// 2. Filter the sequence (turning point, rainflow, risefall, deadband etc.) and convert to cycles /// 3. Filter the list of cycles /// 4. If required, optimise any parameters /// 5. Perform a crack growth calculation /// 6. Write out requested output #[macro_use] extern crate clap; extern crate fatigue; extern crate log; extern crate env_logger; use std::f64::consts::FRAC_PI_2; use std::process; use std::collections::BTreeSet; use fatigue::{beta, cycle, dadn, fracto, grow, io, material, table, tag}; use options_clap::get_options_clap; use options::{OptimMethod, TerminatingOutput}; use fatigue::dadn::DaDn; use fatigue::COMMENT; use std::error::Error; use std::fs::File; use std::path::Path; use log::error; use std::io::Write; mod list; mod optimise; mod sweep; mod factors; mod options; mod options_clap; mod nelder; mod numbers; mod vector; #[cfg(feature = "GSL")] mod optimise_gsl; fn main() { env_logger::init(); // get all the data let materials = material::get_all_dadns(); let mut options = options::get_default_options(); get_options_clap("", &mut options); println!("{}easiGrow: version {}", COMMENT, crate_version!()); println!("{}", COMMENT); if options.verbosity == options::Verbosity::Verbose { println!("{}Options: ", COMMENT); println!("{}", options); } options::read_all_files(&mut options); // process all the modifications to the sequence options.sequence = cycle::process_seq_mods(&options.sequence, &options.seq_mods); // Get the cycles from either the external sequence file, command line or the cycle file. if options.cycle_infile!= "" && options.seq_infile!= "" { error!("Error: you have specified a sequence file '{}' as well as a cycle file '{}'. Specify only one.", options.seq_infile, options.cycle_infile); std::process::exit(2) } let unclosed = if options.cycle_infile == "" { let (cycles, left) = cycle::cycles_from_sequence(&options.sequence, &options.cycle_method); options.cycles = cycles; left } else { Vec::new() }; // process all the modifications to the cycles options.cycles = cycle::process_cycle_mods(&options.cycles, &options.cycle_mods); // Only keep those cycles that remain after filtering the cycles // and mark the turning points associated with those cycles. This // section is only for writing out the modified sequence, since // the filtered cycles are all that is used for crack growth. if options.seq_mods.cycles { let mut keep = vec![false; options.sequence.len()]; for cycle in &options.cycles { keep[cycle.max.index] = true; keep[cycle.min.index] = true; } options.sequence.retain(|s| keep[s.index]) } // Any request for file or info output will result in program // termination. This policy is to reduce the complexity for the // user as to what the program does. // Write out the sequence file. if let Some(outfile) = options.seq_mods.outfile { io::write_sequence(&outfile, &options.sequence); std::process::exit(0); } // Write out the cycles file. if let Some(outfile) = options.cycle_mods.outfile { io::write_cycles(&outfile, &options.cycles); std::process::exit(0); } // write out the beta by converting to a beta table. This can be // then read back in using the file: option for beta selection. if options.beta_outfile!= "" { let beta = beta::get_beta_fn(&options.beta, &options.component); let table_beta = beta.as_table(); // need to write to file let path = Path::new(&options.beta_outfile); let display = path.display(); let mut file = match File::create(&path) { // The `description` method of `io::Error` returns a string that // describes the error Err(why) => { error!( "Error: could not create the file '{}': {}.", display, Error::description(&why) ); std::process::exit(1) } Ok(file) => file, }; let _ = write!(file, "{}", table_beta); std::process::exit(0); } // write out summary information of the sequence match options.output { TerminatingOutput::Summary => { let seq_source = if options.seq_infile!= "" { options.seq_infile } else { // This is a little vague as the sequence could be either // the default sequence or overwritten with a supplied sequence. String::from("(Used specified sequence)") }; cycle::summarise_sequence(&seq_source, &options.sequence, &options.seq_mods); let cycle_source = if options.cycle_infile!= "" { options.cycle_infile } else { format!( "(Obtained from sequence using '{:?}' method)", options.cycle_method ) }; cycle::summarise_cycles( &cycle_source, &options.cycles, &unclosed, &options.cycle_mods, ); std::process::exit(0) } // write out extended list of options and methods TerminatingOutput::List => { list::print_list(); std::process::exit(0); } _ => (), } // get the correct material parameters for the dadn equation or // from the command line. If the params are not given, then get the // dadn material constants from the internal database. let mut params = options.params.clone(); if params.is_empty() { // extract out the appropriate material parameters from a file params = if options.dadn.starts_with("file:") { let filename = options.dadn.trim_start_matches("file:"); println!( "{}No parameters given, using the dk values in the dadn file {}", COMMENT, filename ); let table = table::Table::read_file(filename, true); // collapse down the dks and use these as the parameters for optimising table.variables() // or from the internal database. } else { println!( "{}No parameters given, obtaining from material library for {}", COMMENT, options.dadn ); match materials.iter().find(|m| options.dadn.starts_with(m.name)) { Some(m) => m.eqn.variables(), None => { error!("Error: Unknown dadn model {}", options.dadn); process::exit(1); } } } }; // Optimise the parameters to match the predicted crack growth // rates with the associated measured crack growth rates. if options.optimise.file!= "" { // optimisation scaling factors options.params = params.clone(); println!( "{}Now starting the optimisation with params {:?}...", COMMENT, options.params ); let mut factors = vec![1.0; params.len()]; // non-dimensionalised factors used for optimisation optimise_error(&options, &mut factors); println!("{}...finished the optimisation. ", COMMENT); println!("{}The normalised factors are {:?}", COMMENT, factors); // Rescale the parameters to include the optimised factors params = options .params .iter() .zip(factors) .map(|(p, f)| p * f) .collect::<Vec<f64>>(); println!("{}The scaled optimised factors are: {:?}", COMMENT, params); if options.scale == 0.0 { std::process::exit(0); // not an error if we have performed an optimisation } } // Grow the crack let history_all = generate_crack_history(&options, &params); // Lastly, now that we've grown the crack, check if we need to // generate and write out a pseudo image. if options.image.file!= "" { println!("Making a pseudo image..."); if options.image.file.ends_with(".svg") { fracto::write_svg_pseudo_image(&history_all, &options.image); println!("Image written to file '{}'", options.image.file); } else { error!("Error: Currently easigo can only generate svg. Please use a '.svg' suffix"); } } } #[cfg(not(feature = "GSL"))] fn optimise_error(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors) } }; } #[cfg(feature = "GSL")] fn
(options: &options::EasiOptions, mut factors: &mut [f64]) { match options.optimise.method { OptimMethod::Sweep => sweep::sweep(options, &mut factors), OptimMethod::Nelder => optimise::nelder_match_crack(&options, &mut factors), OptimMethod::Levenberg => optimise_gsl::gsl_match_crack(&options, &mut factors), OptimMethod::All => { sweep::sweep(options, &mut factors); optimise::nelder_match_crack(options, &mut factors); optimise_gsl::gsl_match_crack(options, &mut factors) } }; } // Finally grow the crack with the current parameters which may have been optimised. // We exit here if the scale has not been set. Otherwise we // would go through and do a default calculation which confuses // people if they just want to start the program to see how to get // help. fn generate_crack_history(options: &options::EasiOptions, params: &[f64]) -> Vec<grow::History> { let dadn_eqn = dadn::make_model(&options.dadn, &params, String::from("unknown")); println!("{}da/dN equation: {}", COMMENT, dadn_eqn); let beta = beta::get_beta_fn(&options.beta, &options.component); if options.scale == 0.0 { error!( "Error: The sequence scale factor is 0. You need to set the scale factor (i.e. load or stress level) in order to perform a crack growth calculation. Try\n easigrow --help" ); std::process::exit(1); } if options.cycles.is_empty() { println!("Error: There are no closed cycles in sequence. Perhaps try the re-order sequence option -r"); std::process::exit(1); } // We define the initial state. If any memory effect is to be // included in the crack growth model, the meory should be in this // data structure. let init_crack = grow::CrackState::new(options.a.clone()); let mut history_all = Vec::new(); grow::display_history_header(&options.output_vars); // Non-dimensional ratios for beta factor let c = options.a[options.a.len() - 1]; let a_on_c = options.a[0] / c; let a_on_d = options.a[0] / options.component.forward; let c_on_b = c / options.component.sideways; let a_on_r = options.a[0] / options.component.radius; // phis is a vector of angles around the crack front. It depends // on the beta whether any or all of the angles are used. Most // just use the first and some use the last as well. let phis = vec![0.0, FRAC_PI_2]; // Initialise the history let init_history = grow::History { block: 0.0, stress: 0.0, cycle: cycle::Cycle { max: tag::Tag { value: 0.0, index: 0, }, min: tag::Tag { value: 0.0, index: 0, }, }, k: vec![0.0, 0.0], dk: vec![0.0, 0.0], beta: beta.beta(a_on_d, a_on_c, c_on_b, a_on_r, &phis), da: vec![0.0, 0.0], crack: init_crack, }; grow::display_history_line(&init_history, &options.output_vars, &options.component); let component = grow::FatigueTest { history: init_history, component: options.component.clone(), scale: options.scale, cycles: options.cycles.clone(), a_limit: options.a_limit.clone(), block_limit: options.block_limit, next_cycle: 0, dadn: dadn_eqn, beta, output_vars: options.output_vars.clone(), }; // make a hash set of the lines that are required for output let mut output_lines: BTreeSet<usize> = options.output_lines.iter().cloned().collect(); // if there are no lines in the output then put in the line for the first cycle if options .cycles .iter() .filter(|c| output_lines.contains(&c.max.index) || output_lines.contains(&c.min.index)) .count() == 0 { println!("output_lines {:?}", output_lines); println!( " Warning: There are no sequence lines in the cycle list and so there will be no crack growth output. Consider closing up cycles with re-order to use all sequence lines or include specific sequence lines that are in the cycle. Meanwhile, the output will be for the squence line in the first cycle at line {}.", options.cycles[0].max.index ); output_lines.insert(options.cycles[0].max.index); } // Start the crack growth. This loop steps through each cycle // repeating the cycles until a terminating condition stops the // growth and ends the for loop. for (cycle_no, history) in component.enumerate() { if grow::output_cycle_history(&history, options.output_every, &output_lines, cycle_no) { grow::display_history_line(&history, &options.output_vars, &options.component); } // Only keep the history if we are producing a fracto image. if options.image.file!= "" { history_all.push(history); } } history_all }
optimise_error
identifier_name
aio.rs
::DirectFile; use libaio::raw::{IoOp, Iocontext}; use std::os::unix::io::AsRawFd; use tokio::runtime::current_thread; use tokio_net::util::PollEvented; use libc; use slab::Slab; use log::{info, trace}; #[derive(Debug)] pub enum Message { PRead( DirectFile, usize, usize, BytesMut, oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, ), PWrite( DirectFile, usize, BytesMut, oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, ), } #[derive(Debug)] pub struct Session { pub inner: mpsc::Sender<Message>, thread: JoinHandle<()>, pthread: libc::pthread_t, } #[derive(Debug, Clone)] struct SessionHandle { inner: mpsc::Sender<Message>, } impl Session { pub fn new(max_queue_depth: usize) -> io::Result<Session> { // Users of session interact with us by sending messages. let (tx, rx) = mpsc::channel::<Message>(max_queue_depth); let (tid_tx, tid_rx) = oneshot::channel(); // Spawn a thread with it's own event loop dedicated to AIO let t = thread::spawn(move || { let mut core = current_thread::Runtime::new().unwrap(); // Return the pthread id so the main thread can bind this // thread to a specific core tid_tx.send(unsafe { libc::pthread_self() }).unwrap(); let mut ctx = match Iocontext::<usize, BytesMut, BytesMut>::new(max_queue_depth) { Ok(ctx) => ctx, Err(e) => panic!("could not create Iocontext: {}", e), }; // Using an eventfd, the kernel can notify us when there's // one or more AIO results ready. See'man eventfd' match ctx.get_evfd_stream() { Ok(_) => (), Err(e) => panic!("get_evfd_stream failed: {}", e), }; let evfd = ctx.evfd.as_ref().unwrap().clone(); // Add the eventfd to the file descriptors we are // interested in. This will use epoll under the hood. let source = AioEventFd { inner: evfd }; let stream = PollEvented::new(source); let fut = AioThread { rx: rx, ctx: ctx, stream: stream, handles_pread: Slab::with_capacity(max_queue_depth), handles_pwrite: Slab::with_capacity(max_queue_depth), last_report_ts: SystemTime::now(), stats: AioStats { ..Default::default() }, }; core.spawn(fut); core.run().unwrap(); }); let tid = executor::block_on(tid_rx).unwrap(); Ok(Session { inner: tx, thread: t, pthread: tid, }) } pub fn thread_id(&self) -> libc::pthread_t { self.pthread } } struct AioThread { rx: mpsc::Receiver<Message>, ctx: Iocontext<usize, BytesMut, BytesMut>, stream: PollEvented<AioEventFd>, // Handles to outstanding requests handles_pread: Slab<HandleEntry>, handles_pwrite: Slab<HandleEntry>, last_report_ts: SystemTime, stats: AioStats, } struct HandleEntry { complete: oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, } #[derive(Default)] struct AioStats { curr_polls: u64, curr_preads: u64, curr_pwrites: u64, prev_polls: u64, prev_preads: u64, prev_pwrites: u64, } impl Future for AioThread { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!( "============ AioThread.poll (inflight_preads:{} inflight_pwrites:{})", self.handles_pread.len(), self.handles_pwrite.len() ); self.stats.curr_polls += 1; // If there are any responses from the kernel available, read // as many as we can without blocking. let ready = mio::Ready::readable(); if Pin::new(&mut self.stream) .poll_read_ready(cx, ready) .is_ready() { match self.ctx.results(0, 100, None) { Ok(res) => { trace!(" got {} AIO responses", res.len()); for (op, result) in res.into_iter() { match op { IoOp::Pread(retbuf, token) => { trace!( " got pread response, token {}, is error? {}", token, result.is_err() ); match result { Ok(_) => { let entry = self.handles_pread.remove(token); //?.unwrap(); //let elapsed = entry.timestamp.elapsed().expect("Time drift!"); //trace!("pread returned in {} us", ((elapsed.as_secs() * 1_000_000_000) + elapsed.subsec_nanos() as u64) / 1000); //entry.complete.send(Ok((retbuf, None))).expect("Could not send AioSession response"); entry.complete.send(Ok((retbuf, None))); } Err(e) => panic!("pread error {:?}", e), } } IoOp::Pwrite(retbuf, token) => { trace!( " got pwrite response, token {}, is error? {}", token, result.is_err() ); match result { Ok(_) => { let entry = self.handles_pwrite.remove(token); //?.unwrap(); entry.complete.send(Ok((retbuf, None))); } Err(e) => panic!("pwrite error {:?}", e), } } _ => (), } } } Err(e) => panic!("ctx.results failed: {:?}", e), } }; // Read all available incoming requests, enqueue in AIO batch loop { let msg = match Pin::new(&mut self.rx).poll_next(cx) { Poll::Ready(Some(msg)) => msg, Poll::Ready(None) => break, Poll::Pending => break, // AioThread.poll is automatically scheduled }; match msg { Message::PRead(file, offset, len, buf, complete) => { self.stats.curr_preads += 1; // The self is a Pin<&mut Self>. Obtaining mutable references to the fields // will require going through DerefMut, which requires unique borrow. // You can avoid the issue by dereferencing self once on entry to the method // let this = &mut *self, and then continue accessing it // through this. // The basic idea is that each access to self.deref_mut() // basically will create a new mutable reference to self, if // you do it multiple times you get the error, so by // effectively calling deref_mut by hand I can save the // reference once and use it when needed. let this = &mut *self; let entry = this.handles_pread.vacant_entry(); let key = entry.key(); match this.ctx.pread(&file, buf, offset as i64, len, key) { Ok(()) => { entry.insert(HandleEntry { complete: complete }); } Err((buf, _token)) => { complete .send(Ok(( buf, Some(io::Error::new(io::ErrorKind::Other, "pread failed")), ))) .expect("Could not send AioThread error response"); } }; } Message::PWrite(file, offset, buf, complete) => { self.stats.curr_pwrites += 1; let this = &mut *self; let entry = this.handles_pwrite.vacant_entry(); let key = entry.key(); match this.ctx.pwrite(&file, buf, offset as i64, key) { Ok(()) => { entry.insert(HandleEntry { complete: complete }); } Err((buf, _token)) => { complete .send(Ok(( buf, Some(io::Error::new(io::ErrorKind::Other, "pread failed")), ))) .expect("Could not send AioThread error response"); } } } } // TODO: If max queue depth is reached, do not receive any // more messages, will cause clients to block } // TODO: Need busywait for submit timeout trace!(" batch size {}", self.ctx.batched()); while self.ctx.batched() > 0 { if let Err(e) = self.ctx.submit() { panic!("batch submit failed {:?}", e); } } let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0; if need_read { // Not sure I totally understand how the old need_read works vs the // new clear_read_ready call. trace!(" calling stream.clear_read_ready()"); Pin::new(&mut self.stream).clear_read_ready(cx, ready); } // Print some useful stats if self.stats.curr_polls % 10000 == 0 { let elapsed = self.last_report_ts.elapsed().expect("Time drift!"); let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64 + elapsed.subsec_nanos() as f64) / 1000000.0; let polls = self.stats.curr_polls - self.stats.prev_polls; let preads = self.stats.curr_preads - self.stats.prev_preads; let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites; let preads_inflight = self.handles_pread.len(); let pwrites_inflight = self.handles_pwrite.len(); let thread_id = unsafe { libc::pthread_self() }; info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}", thread_id, polls as f64 / elapsed_ms * 1000.0, preads as f64 / elapsed_ms * 1000.0, pwrites as f64 / elapsed_ms * 1000.0, preads_inflight, pwrites_inflight, (preads as f64 + pwrites as f64) / polls as f64); self.stats.prev_polls = self.stats.curr_polls; self.stats.prev_preads = self.stats.curr_preads; self.stats.prev_pwrites = self.stats.curr_pwrites; self.last_report_ts = SystemTime::now(); } // Run forever Poll::Pending } } // Register the eventfd with mio struct AioEventFd { inner: EventFD, } impl mio::Evented for AioEventFd { fn register( &self, poll: &mio::Poll, token: mio::Token, interest: mio::Ready, opts: mio::PollOpt, ) -> io::Result<()> { trace!("AioEventFd.register"); mio::unix::EventedFd(&self.inner.as_raw_fd()).register(poll, token, interest, opts) } fn reregister( &self, poll: &mio::Poll, token: mio::Token, interest: mio::Ready, opts: mio::PollOpt, ) -> io::Result<()> { trace!("AioEventFd.reregister"); mio::unix::EventedFd(&self.inner.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &mio::Poll) -> io::Result<()> { trace!("AioEventFd.deregister"); mio::unix::EventedFd(&self.inner.as_raw_fd()).deregister(poll) } } #[cfg(test)] mod tests { extern crate env_logger; extern crate tempdir; extern crate uuid; use self::tempdir::TempDir; use byteorder::{BigEndian, ByteOrder}; use std::fs::File; use std::io; use std::io::Write; use std::path::Path; use aio::{Message, Session}; use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use libaio::directio::{DirectFile, FileAccess, Mode}; use futures::channel::oneshot; use futures::{stream, Future, Sink, Stream}; #[test] fn test_init() { let session = Session::new(512); assert!(session.is_ok()); } // TODO: Test max queue depth #[test] fn test_pread() { env_logger::init().unwrap(); let path = new_file_with_sequential_u64("pread", 1024); let file = DirectFile::open(path, Mode::Open, FileAccess::Read, 4096).unwrap(); let session = Session::new(2).unwrap(); let mut buf = BytesMut::with_capacity(512); unsafe { buf.set_len(512) }; let (tx, rx) = oneshot::channel(); let fut = session.inner.send(Message::PRead(file, 0, 512, buf, tx)); fut.wait(); let res = rx.wait(); assert!(res.is_ok()); let res = res.unwrap(); assert!(res.is_ok()); let (mut buf, err) = res.unwrap(); assert!(err.is_none()); for i in 0..(512 / 8) { assert_eq!(i, buf.split_to(8).into_buf().get_u64::<BigEndian>()); } assert_eq!(0, buf.len()); } #[test] fn test_pread_many() { //env_logger::init().unwrap(); let path = new_file_with_sequential_u64("pread", 10240); let session = Session::new(4).unwrap(); //let handle1 = session.handle(); //let handle2 = session.handle();
// let file = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let mut buf = BytesMut::with_capacity(512); // unsafe { buf.set_len(512) }; // let (tx, rx) = oneshot::channel(); // session.inner.send(Message::PRead(file, 0, 512, buf, tx)) // }); // let file1 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let file2 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let mut buf1 = BytesMut::with_capacity(512); // let mut buf2 = BytesMut::with_capacity(512); // unsafe { buf1.set_len(512) }; // unsafe { buf2.set_len(512) }; // let req1 = handle1.pread(file1, 0, 512, buf1); // let req2 = handle2.pread(file2, 0, 512, buf2); // //session.inner.clone().send(Message::PRead(file2, 0, 512, buf2, tx2)); // let res = req1.wait(); //let stream: Stream<Item=Message, Error=io::Error> = stream::iter(reads); //let stream: Stream<Item=Message, Error=io::Error> = stream::iter((0..5).map(Ok)); //let responses = session.inner.send
// let reads = (0..5).map(|_| { // println!("foo");
random_line_split
aio.rs
DirectFile; use libaio::raw::{IoOp, Iocontext}; use std::os::unix::io::AsRawFd; use tokio::runtime::current_thread; use tokio_net::util::PollEvented; use libc; use slab::Slab; use log::{info, trace}; #[derive(Debug)] pub enum Message { PRead( DirectFile, usize, usize, BytesMut, oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, ), PWrite( DirectFile, usize, BytesMut, oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, ), } #[derive(Debug)] pub struct Session { pub inner: mpsc::Sender<Message>, thread: JoinHandle<()>, pthread: libc::pthread_t, } #[derive(Debug, Clone)] struct SessionHandle { inner: mpsc::Sender<Message>, } impl Session { pub fn new(max_queue_depth: usize) -> io::Result<Session> { // Users of session interact with us by sending messages. let (tx, rx) = mpsc::channel::<Message>(max_queue_depth); let (tid_tx, tid_rx) = oneshot::channel(); // Spawn a thread with it's own event loop dedicated to AIO let t = thread::spawn(move || { let mut core = current_thread::Runtime::new().unwrap(); // Return the pthread id so the main thread can bind this // thread to a specific core tid_tx.send(unsafe { libc::pthread_self() }).unwrap(); let mut ctx = match Iocontext::<usize, BytesMut, BytesMut>::new(max_queue_depth) { Ok(ctx) => ctx, Err(e) => panic!("could not create Iocontext: {}", e), }; // Using an eventfd, the kernel can notify us when there's // one or more AIO results ready. See'man eventfd' match ctx.get_evfd_stream() { Ok(_) => (), Err(e) => panic!("get_evfd_stream failed: {}", e), }; let evfd = ctx.evfd.as_ref().unwrap().clone(); // Add the eventfd to the file descriptors we are // interested in. This will use epoll under the hood. let source = AioEventFd { inner: evfd }; let stream = PollEvented::new(source); let fut = AioThread { rx: rx, ctx: ctx, stream: stream, handles_pread: Slab::with_capacity(max_queue_depth), handles_pwrite: Slab::with_capacity(max_queue_depth), last_report_ts: SystemTime::now(), stats: AioStats { ..Default::default() }, }; core.spawn(fut); core.run().unwrap(); }); let tid = executor::block_on(tid_rx).unwrap(); Ok(Session { inner: tx, thread: t, pthread: tid, }) } pub fn thread_id(&self) -> libc::pthread_t { self.pthread } } struct AioThread { rx: mpsc::Receiver<Message>, ctx: Iocontext<usize, BytesMut, BytesMut>, stream: PollEvented<AioEventFd>, // Handles to outstanding requests handles_pread: Slab<HandleEntry>, handles_pwrite: Slab<HandleEntry>, last_report_ts: SystemTime, stats: AioStats, } struct HandleEntry { complete: oneshot::Sender<io::Result<(BytesMut, Option<io::Error>)>>, } #[derive(Default)] struct
{ curr_polls: u64, curr_preads: u64, curr_pwrites: u64, prev_polls: u64, prev_preads: u64, prev_pwrites: u64, } impl Future for AioThread { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!( "============ AioThread.poll (inflight_preads:{} inflight_pwrites:{})", self.handles_pread.len(), self.handles_pwrite.len() ); self.stats.curr_polls += 1; // If there are any responses from the kernel available, read // as many as we can without blocking. let ready = mio::Ready::readable(); if Pin::new(&mut self.stream) .poll_read_ready(cx, ready) .is_ready() { match self.ctx.results(0, 100, None) { Ok(res) => { trace!(" got {} AIO responses", res.len()); for (op, result) in res.into_iter() { match op { IoOp::Pread(retbuf, token) => { trace!( " got pread response, token {}, is error? {}", token, result.is_err() ); match result { Ok(_) => { let entry = self.handles_pread.remove(token); //?.unwrap(); //let elapsed = entry.timestamp.elapsed().expect("Time drift!"); //trace!("pread returned in {} us", ((elapsed.as_secs() * 1_000_000_000) + elapsed.subsec_nanos() as u64) / 1000); //entry.complete.send(Ok((retbuf, None))).expect("Could not send AioSession response"); entry.complete.send(Ok((retbuf, None))); } Err(e) => panic!("pread error {:?}", e), } } IoOp::Pwrite(retbuf, token) => { trace!( " got pwrite response, token {}, is error? {}", token, result.is_err() ); match result { Ok(_) => { let entry = self.handles_pwrite.remove(token); //?.unwrap(); entry.complete.send(Ok((retbuf, None))); } Err(e) => panic!("pwrite error {:?}", e), } } _ => (), } } } Err(e) => panic!("ctx.results failed: {:?}", e), } }; // Read all available incoming requests, enqueue in AIO batch loop { let msg = match Pin::new(&mut self.rx).poll_next(cx) { Poll::Ready(Some(msg)) => msg, Poll::Ready(None) => break, Poll::Pending => break, // AioThread.poll is automatically scheduled }; match msg { Message::PRead(file, offset, len, buf, complete) => { self.stats.curr_preads += 1; // The self is a Pin<&mut Self>. Obtaining mutable references to the fields // will require going through DerefMut, which requires unique borrow. // You can avoid the issue by dereferencing self once on entry to the method // let this = &mut *self, and then continue accessing it // through this. // The basic idea is that each access to self.deref_mut() // basically will create a new mutable reference to self, if // you do it multiple times you get the error, so by // effectively calling deref_mut by hand I can save the // reference once and use it when needed. let this = &mut *self; let entry = this.handles_pread.vacant_entry(); let key = entry.key(); match this.ctx.pread(&file, buf, offset as i64, len, key) { Ok(()) => { entry.insert(HandleEntry { complete: complete }); } Err((buf, _token)) => { complete .send(Ok(( buf, Some(io::Error::new(io::ErrorKind::Other, "pread failed")), ))) .expect("Could not send AioThread error response"); } }; } Message::PWrite(file, offset, buf, complete) => { self.stats.curr_pwrites += 1; let this = &mut *self; let entry = this.handles_pwrite.vacant_entry(); let key = entry.key(); match this.ctx.pwrite(&file, buf, offset as i64, key) { Ok(()) => { entry.insert(HandleEntry { complete: complete }); } Err((buf, _token)) => { complete .send(Ok(( buf, Some(io::Error::new(io::ErrorKind::Other, "pread failed")), ))) .expect("Could not send AioThread error response"); } } } } // TODO: If max queue depth is reached, do not receive any // more messages, will cause clients to block } // TODO: Need busywait for submit timeout trace!(" batch size {}", self.ctx.batched()); while self.ctx.batched() > 0 { if let Err(e) = self.ctx.submit() { panic!("batch submit failed {:?}", e); } } let need_read = self.handles_pread.len() > 0 || self.handles_pwrite.len() > 0; if need_read { // Not sure I totally understand how the old need_read works vs the // new clear_read_ready call. trace!(" calling stream.clear_read_ready()"); Pin::new(&mut self.stream).clear_read_ready(cx, ready); } // Print some useful stats if self.stats.curr_polls % 10000 == 0 { let elapsed = self.last_report_ts.elapsed().expect("Time drift!"); let elapsed_ms = ((elapsed.as_secs() * 1_000_000_000) as f64 + elapsed.subsec_nanos() as f64) / 1000000.0; let polls = self.stats.curr_polls - self.stats.prev_polls; let preads = self.stats.curr_preads - self.stats.prev_preads; let pwrites = self.stats.curr_pwrites - self.stats.prev_pwrites; let preads_inflight = self.handles_pread.len(); let pwrites_inflight = self.handles_pwrite.len(); let thread_id = unsafe { libc::pthread_self() }; info!("threadid:{} polls:{:.0}/sec preads:{:.0}/sec pwrites:{:.0}/sec, inflight:({},{}) reqs/poll:{:.2}", thread_id, polls as f64 / elapsed_ms * 1000.0, preads as f64 / elapsed_ms * 1000.0, pwrites as f64 / elapsed_ms * 1000.0, preads_inflight, pwrites_inflight, (preads as f64 + pwrites as f64) / polls as f64); self.stats.prev_polls = self.stats.curr_polls; self.stats.prev_preads = self.stats.curr_preads; self.stats.prev_pwrites = self.stats.curr_pwrites; self.last_report_ts = SystemTime::now(); } // Run forever Poll::Pending } } // Register the eventfd with mio struct AioEventFd { inner: EventFD, } impl mio::Evented for AioEventFd { fn register( &self, poll: &mio::Poll, token: mio::Token, interest: mio::Ready, opts: mio::PollOpt, ) -> io::Result<()> { trace!("AioEventFd.register"); mio::unix::EventedFd(&self.inner.as_raw_fd()).register(poll, token, interest, opts) } fn reregister( &self, poll: &mio::Poll, token: mio::Token, interest: mio::Ready, opts: mio::PollOpt, ) -> io::Result<()> { trace!("AioEventFd.reregister"); mio::unix::EventedFd(&self.inner.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &mio::Poll) -> io::Result<()> { trace!("AioEventFd.deregister"); mio::unix::EventedFd(&self.inner.as_raw_fd()).deregister(poll) } } #[cfg(test)] mod tests { extern crate env_logger; extern crate tempdir; extern crate uuid; use self::tempdir::TempDir; use byteorder::{BigEndian, ByteOrder}; use std::fs::File; use std::io; use std::io::Write; use std::path::Path; use aio::{Message, Session}; use bytes::{Buf, BufMut, BytesMut, IntoBuf}; use libaio::directio::{DirectFile, FileAccess, Mode}; use futures::channel::oneshot; use futures::{stream, Future, Sink, Stream}; #[test] fn test_init() { let session = Session::new(512); assert!(session.is_ok()); } // TODO: Test max queue depth #[test] fn test_pread() { env_logger::init().unwrap(); let path = new_file_with_sequential_u64("pread", 1024); let file = DirectFile::open(path, Mode::Open, FileAccess::Read, 4096).unwrap(); let session = Session::new(2).unwrap(); let mut buf = BytesMut::with_capacity(512); unsafe { buf.set_len(512) }; let (tx, rx) = oneshot::channel(); let fut = session.inner.send(Message::PRead(file, 0, 512, buf, tx)); fut.wait(); let res = rx.wait(); assert!(res.is_ok()); let res = res.unwrap(); assert!(res.is_ok()); let (mut buf, err) = res.unwrap(); assert!(err.is_none()); for i in 0..(512 / 8) { assert_eq!(i, buf.split_to(8).into_buf().get_u64::<BigEndian>()); } assert_eq!(0, buf.len()); } #[test] fn test_pread_many() { //env_logger::init().unwrap(); let path = new_file_with_sequential_u64("pread", 10240); let session = Session::new(4).unwrap(); //let handle1 = session.handle(); //let handle2 = session.handle(); // let reads = (0..5).map(|_| { // println!("foo"); // let file = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let mut buf = BytesMut::with_capacity(512); // unsafe { buf.set_len(512) }; // let (tx, rx) = oneshot::channel(); // session.inner.send(Message::PRead(file, 0, 512, buf, tx)) // }); // let file1 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let file2 = DirectFile::open(path.clone(), Mode::Open, FileAccess::Read, 4096).unwrap(); // let mut buf1 = BytesMut::with_capacity(512); // let mut buf2 = BytesMut::with_capacity(512); // unsafe { buf1.set_len(512) }; // unsafe { buf2.set_len(512) }; // let req1 = handle1.pread(file1, 0, 512, buf1); // let req2 = handle2.pread(file2, 0, 512, buf2); // //session.inner.clone().send(Message::PRead(file2, 0, 512, buf2, tx2)); // let res = req1.wait(); //let stream: Stream<Item=Message, Error=io::Error> = stream::iter(reads); //let stream: Stream<Item=Message, Error=io::Error> = stream::iter((0..5).map(Ok)); //let responses = session.inner
AioStats
identifier_name
handshake.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 //! The handshake module implements the handshake part of the protocol. //! This module also implements additional anti-DoS mitigation, //! by including a timestamp in each handshake initialization message. //! Refer to the module's documentation for more information. //! A successful handshake returns a `NoiseStream` which is defined in [socket] module. //! //! [socket]: network::noise_wrapper::socket use crate::noise_wrapper::stream::NoiseStream; use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use libra_config::config::NetworkPeerInfo; use libra_crypto::{noise, x25519}; use libra_types::PeerId; use netcore::transport::ConnectionOrigin; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, time, }; /// In a mutually authenticated network, a client message is accompanied with a timestamp. /// This is in order to prevent replay attacks, where the attacker does not know the client's static key, /// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations. /// /// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing, /// effectively considering it as a stateful counter. /// /// If the client timestamp has been seen before, or is not strictly increasing, /// we can abort the handshake early and avoid heavy Diffie-Hellman computations. /// If the client timestamp is valid, we store it. #[derive(Default)] pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>); impl AntiReplayTimestamps { /// Returns true if the timestamp has already been observed for this peer /// or if it's an old timestamp pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool { if let Some(last_timestamp) = self.0.get(&pubkey) { &timestamp <= last_timestamp } else { false } } /// Stores the timestamp pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) { self.0 .entry(pubkey) .and_modify(|last_timestamp| *last_timestamp = timestamp) .or_insert(timestamp); } } /// The timestamp is sent as a payload, so that it is encrypted. /// Note that a millisecond value is a 16-byte value in rust, /// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes. const PAYLOAD_SIZE: usize = 8; // Noise Wrapper // ------------- // Noise by default is not aware of the above or lower protocol layers, // We thus need to build this wrapper around Noise to both: // // - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages) // - understand how long noise messages we send and receive are, // in order to pass them to the noise implementaiton // /// The Noise configuration to be used to perform a protocol upgrade on an underlying socket. pub struct NoiseWrapper(noise::NoiseConfig); impl NoiseWrapper { /// Create a new NoiseConfig with the provided keypair pub fn new(key: x25519::PrivateKey) -> Self { Self(noise::NoiseConfig::new(key)) } /// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX /// handshake to establish a noise stream and exchange static public keys. Upon success, /// returns the static public key of the remote as well as a NoiseStream. // TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done pub async fn upgrade_connection<TSocket>( &self, socket: TSocket, origin: ConnectionOrigin, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, remote_public_key: Option<x25519::PublicKey>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)> where TSocket: AsyncRead + AsyncWrite + Unpin, { // perform the noise handshake let socket = match origin { ConnectionOrigin::Outbound => { let remote_public_key = match remote_public_key { Some(key) => key, None if cfg!(any(test, feature = "fuzzing")) => unreachable!(), None => { return Err(std::io::Error::new( std::io::ErrorKind::Other, "noise: SHOULD NOT HAPPEN: missing server's key when dialing", )); } }; self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key) .await? } ConnectionOrigin::Inbound => { self.accept(socket, anti_replay_timestamps, trusted_peers) .await? } }; // return remote public key with a socket including the noise stream let remote_public_key = socket.get_remote_static(); Ok((remote_public_key, socket)) } pub async fn dial<TSocket>( &self, mut socket: TSocket, mutual_authentication: bool, remote_public_key: x25519::PublicKey, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // in mutual authenticated networks, send a payload of the current timestamp (in milliseconds) let payload = if mutual_authentication { let now: u64 = time::SystemTime::now() .duration_since(time::UNIX_EPOCH) .expect("system clock should work") .as_millis() as u64; // e.g. [157, 126, 253, 97, 114, 1, 0, 0] let now = now.to_le_bytes().to_vec(); Some(now) } else { None }; // create first handshake message (-> e, es, s, ss) let mut rng = rand::rngs::OsRng; let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; let initiator_state = self .0 .initiate_connection( &mut rng, &[], remote_public_key, payload.as_ref().map(|x| &x[..]), &mut first_message, ) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // write the first handshake message socket.write_all(&first_message).await?; // flush socket.flush().await?; // receive the server's response (<- e, ee, se) let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; socket.read_exact(&mut server_response).await?; // parse the server's response // TODO: security logging here? (mimoo) let (_, session) = self .0 .finalize_connection(initiator_state, &server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // finalize the connection Ok(NoiseStream::new(socket, session)) } pub async fn accept<TSocket>( &self, mut socket: TSocket, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // receive the initiation message let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; socket.read_exact(&mut client_init_message).await?; // parse it let (their_public_key, handshake_state, payload) = self .0 .parse_client_init_message(&[], &client_init_message) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // make sure the public key is a validator before continuing (if we're in the validator network) if let Some(trusted_peers) = trusted_peers { let found = trusted_peers .read() .map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read trusted_peers lock", ) })? .iter() .any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key); if!found {
their_public_key ), )); } } // if on a mutually authenticated network if let Some(anti_replay_timestamps) = &anti_replay_timestamps { // check that the payload received as the client timestamp (in seconds) if payload.len()!= PAYLOAD_SIZE { // TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, "noise: client initiated connection without an 8-byte timestamp", )); } let mut client_timestamp = [0u8; PAYLOAD_SIZE]; client_timestamp.copy_from_slice(&payload); let client_timestamp = u64::from_le_bytes(client_timestamp); // check the timestamp is not a replay let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read anti_replay_timestamps lock", ) })?; if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) { // TODO: security logging the ip + blocking the ip? (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client initiated connection with a timestamp already seen before: {}", client_timestamp ), )); } // store the timestamp anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp); } // construct the response let mut rng = rand::rngs::OsRng; let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; let session = self .0 .respond_to_client(&mut rng, handshake_state, None, &mut server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // send the response socket.write_all(&server_response).await?; // finalize the connection Ok(NoiseStream::new(socket, session)) } } // // Tests // ----- // #[cfg(test)] mod test { use super::*; use futures::{executor::block_on, future::join}; use libra_crypto::test_utils::TEST_SEED; use memsocket::MemorySocket; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, }; use libra_crypto::traits::Uniform as _; use rand::SeedableRng as _; /// helper to setup two testing peers fn build_peers() -> ( (NoiseWrapper, x25519::PublicKey), (NoiseWrapper, x25519::PublicKey), ) { let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED); let client_private = x25519::PrivateKey::generate(&mut rng); let client_public = client_private.public_key(); let server_private = x25519::PrivateKey::generate(&mut rng); let server_public = server_private.public_key(); let client = NoiseWrapper::new(client_private); let server = NoiseWrapper::new(server_private); ((client, client_public), (server, server_public)) } /// helper to perform a noise handshake with two peers fn perform_handshake( client: NoiseWrapper, server_public_key: x25519::PublicKey, server: NoiseWrapper, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> { // create an in-memory socket for testing let (dialer_socket, listener_socket) = MemorySocket::new_pair(); let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default())); // perform the handshake let (client_session, server_session) = block_on(join( client.dial(dialer_socket, true, server_public_key), server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers), )); // Ok((client_session?, server_session?)) } #[test] fn test_handshake() { // perform handshake with two testing peers let ((client, client_public), (server, server_public)) = build_peers(); let (client, server) = perform_handshake(client, server_public, server, None).unwrap(); assert_eq!(client.get_remote_static(), server_public,); assert_eq!(server.get_remote_static(), client_public,); } }
// TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client connecting to us with an unknown public key: {}",
random_line_split
handshake.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 //! The handshake module implements the handshake part of the protocol. //! This module also implements additional anti-DoS mitigation, //! by including a timestamp in each handshake initialization message. //! Refer to the module's documentation for more information. //! A successful handshake returns a `NoiseStream` which is defined in [socket] module. //! //! [socket]: network::noise_wrapper::socket use crate::noise_wrapper::stream::NoiseStream; use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use libra_config::config::NetworkPeerInfo; use libra_crypto::{noise, x25519}; use libra_types::PeerId; use netcore::transport::ConnectionOrigin; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, time, }; /// In a mutually authenticated network, a client message is accompanied with a timestamp. /// This is in order to prevent replay attacks, where the attacker does not know the client's static key, /// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations. /// /// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing, /// effectively considering it as a stateful counter. /// /// If the client timestamp has been seen before, or is not strictly increasing, /// we can abort the handshake early and avoid heavy Diffie-Hellman computations. /// If the client timestamp is valid, we store it. #[derive(Default)] pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>); impl AntiReplayTimestamps { /// Returns true if the timestamp has already been observed for this peer /// or if it's an old timestamp pub fn is_replay(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool { if let Some(last_timestamp) = self.0.get(&pubkey) { &timestamp <= last_timestamp } else { false } } /// Stores the timestamp pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) { self.0 .entry(pubkey) .and_modify(|last_timestamp| *last_timestamp = timestamp) .or_insert(timestamp); } } /// The timestamp is sent as a payload, so that it is encrypted. /// Note that a millisecond value is a 16-byte value in rust, /// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes. const PAYLOAD_SIZE: usize = 8; // Noise Wrapper // ------------- // Noise by default is not aware of the above or lower protocol layers, // We thus need to build this wrapper around Noise to both: // // - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages) // - understand how long noise messages we send and receive are, // in order to pass them to the noise implementaiton // /// The Noise configuration to be used to perform a protocol upgrade on an underlying socket. pub struct NoiseWrapper(noise::NoiseConfig); impl NoiseWrapper { /// Create a new NoiseConfig with the provided keypair pub fn new(key: x25519::PrivateKey) -> Self { Self(noise::NoiseConfig::new(key)) } /// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX /// handshake to establish a noise stream and exchange static public keys. Upon success, /// returns the static public key of the remote as well as a NoiseStream. // TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done pub async fn upgrade_connection<TSocket>( &self, socket: TSocket, origin: ConnectionOrigin, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, remote_public_key: Option<x25519::PublicKey>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)> where TSocket: AsyncRead + AsyncWrite + Unpin, { // perform the noise handshake let socket = match origin { ConnectionOrigin::Outbound => { let remote_public_key = match remote_public_key { Some(key) => key, None if cfg!(any(test, feature = "fuzzing")) => unreachable!(), None =>
}; self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key) .await? } ConnectionOrigin::Inbound => { self.accept(socket, anti_replay_timestamps, trusted_peers) .await? } }; // return remote public key with a socket including the noise stream let remote_public_key = socket.get_remote_static(); Ok((remote_public_key, socket)) } pub async fn dial<TSocket>( &self, mut socket: TSocket, mutual_authentication: bool, remote_public_key: x25519::PublicKey, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // in mutual authenticated networks, send a payload of the current timestamp (in milliseconds) let payload = if mutual_authentication { let now: u64 = time::SystemTime::now() .duration_since(time::UNIX_EPOCH) .expect("system clock should work") .as_millis() as u64; // e.g. [157, 126, 253, 97, 114, 1, 0, 0] let now = now.to_le_bytes().to_vec(); Some(now) } else { None }; // create first handshake message (-> e, es, s, ss) let mut rng = rand::rngs::OsRng; let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; let initiator_state = self .0 .initiate_connection( &mut rng, &[], remote_public_key, payload.as_ref().map(|x| &x[..]), &mut first_message, ) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // write the first handshake message socket.write_all(&first_message).await?; // flush socket.flush().await?; // receive the server's response (<- e, ee, se) let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; socket.read_exact(&mut server_response).await?; // parse the server's response // TODO: security logging here? (mimoo) let (_, session) = self .0 .finalize_connection(initiator_state, &server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // finalize the connection Ok(NoiseStream::new(socket, session)) } pub async fn accept<TSocket>( &self, mut socket: TSocket, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // receive the initiation message let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; socket.read_exact(&mut client_init_message).await?; // parse it let (their_public_key, handshake_state, payload) = self .0 .parse_client_init_message(&[], &client_init_message) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // make sure the public key is a validator before continuing (if we're in the validator network) if let Some(trusted_peers) = trusted_peers { let found = trusted_peers .read() .map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read trusted_peers lock", ) })? .iter() .any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key); if!found { // TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client connecting to us with an unknown public key: {}", their_public_key ), )); } } // if on a mutually authenticated network if let Some(anti_replay_timestamps) = &anti_replay_timestamps { // check that the payload received as the client timestamp (in seconds) if payload.len()!= PAYLOAD_SIZE { // TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, "noise: client initiated connection without an 8-byte timestamp", )); } let mut client_timestamp = [0u8; PAYLOAD_SIZE]; client_timestamp.copy_from_slice(&payload); let client_timestamp = u64::from_le_bytes(client_timestamp); // check the timestamp is not a replay let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read anti_replay_timestamps lock", ) })?; if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) { // TODO: security logging the ip + blocking the ip? (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client initiated connection with a timestamp already seen before: {}", client_timestamp ), )); } // store the timestamp anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp); } // construct the response let mut rng = rand::rngs::OsRng; let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; let session = self .0 .respond_to_client(&mut rng, handshake_state, None, &mut server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // send the response socket.write_all(&server_response).await?; // finalize the connection Ok(NoiseStream::new(socket, session)) } } // // Tests // ----- // #[cfg(test)] mod test { use super::*; use futures::{executor::block_on, future::join}; use libra_crypto::test_utils::TEST_SEED; use memsocket::MemorySocket; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, }; use libra_crypto::traits::Uniform as _; use rand::SeedableRng as _; /// helper to setup two testing peers fn build_peers() -> ( (NoiseWrapper, x25519::PublicKey), (NoiseWrapper, x25519::PublicKey), ) { let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED); let client_private = x25519::PrivateKey::generate(&mut rng); let client_public = client_private.public_key(); let server_private = x25519::PrivateKey::generate(&mut rng); let server_public = server_private.public_key(); let client = NoiseWrapper::new(client_private); let server = NoiseWrapper::new(server_private); ((client, client_public), (server, server_public)) } /// helper to perform a noise handshake with two peers fn perform_handshake( client: NoiseWrapper, server_public_key: x25519::PublicKey, server: NoiseWrapper, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> { // create an in-memory socket for testing let (dialer_socket, listener_socket) = MemorySocket::new_pair(); let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default())); // perform the handshake let (client_session, server_session) = block_on(join( client.dial(dialer_socket, true, server_public_key), server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers), )); // Ok((client_session?, server_session?)) } #[test] fn test_handshake() { // perform handshake with two testing peers let ((client, client_public), (server, server_public)) = build_peers(); let (client, server) = perform_handshake(client, server_public, server, None).unwrap(); assert_eq!(client.get_remote_static(), server_public,); assert_eq!(server.get_remote_static(), client_public,); } }
{ return Err(std::io::Error::new( std::io::ErrorKind::Other, "noise: SHOULD NOT HAPPEN: missing server's key when dialing", )); }
conditional_block
handshake.rs
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 //! The handshake module implements the handshake part of the protocol. //! This module also implements additional anti-DoS mitigation, //! by including a timestamp in each handshake initialization message. //! Refer to the module's documentation for more information. //! A successful handshake returns a `NoiseStream` which is defined in [socket] module. //! //! [socket]: network::noise_wrapper::socket use crate::noise_wrapper::stream::NoiseStream; use futures::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use libra_config::config::NetworkPeerInfo; use libra_crypto::{noise, x25519}; use libra_types::PeerId; use netcore::transport::ConnectionOrigin; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, time, }; /// In a mutually authenticated network, a client message is accompanied with a timestamp. /// This is in order to prevent replay attacks, where the attacker does not know the client's static key, /// but can still replay a handshake message in order to force a peer into performing a few Diffie-Hellman key exchange operations. /// /// Thus, to prevent replay attacks a responder will always check if the timestamp is strictly increasing, /// effectively considering it as a stateful counter. /// /// If the client timestamp has been seen before, or is not strictly increasing, /// we can abort the handshake early and avoid heavy Diffie-Hellman computations. /// If the client timestamp is valid, we store it. #[derive(Default)] pub struct AntiReplayTimestamps(HashMap<x25519::PublicKey, u64>); impl AntiReplayTimestamps { /// Returns true if the timestamp has already been observed for this peer /// or if it's an old timestamp pub fn
(&self, pubkey: x25519::PublicKey, timestamp: u64) -> bool { if let Some(last_timestamp) = self.0.get(&pubkey) { &timestamp <= last_timestamp } else { false } } /// Stores the timestamp pub fn store_timestamp(&mut self, pubkey: x25519::PublicKey, timestamp: u64) { self.0 .entry(pubkey) .and_modify(|last_timestamp| *last_timestamp = timestamp) .or_insert(timestamp); } } /// The timestamp is sent as a payload, so that it is encrypted. /// Note that a millisecond value is a 16-byte value in rust, /// but as we use it to store a duration since UNIX_EPOCH we will never use more than 8 bytes. const PAYLOAD_SIZE: usize = 8; // Noise Wrapper // ------------- // Noise by default is not aware of the above or lower protocol layers, // We thus need to build this wrapper around Noise to both: // // - fragment messages that need to be encrypted by noise (due to its maximum 65535-byte messages) // - understand how long noise messages we send and receive are, // in order to pass them to the noise implementaiton // /// The Noise configuration to be used to perform a protocol upgrade on an underlying socket. pub struct NoiseWrapper(noise::NoiseConfig); impl NoiseWrapper { /// Create a new NoiseConfig with the provided keypair pub fn new(key: x25519::PrivateKey) -> Self { Self(noise::NoiseConfig::new(key)) } /// Perform a protocol upgrade on an underlying connection. In addition perform the noise IX /// handshake to establish a noise stream and exchange static public keys. Upon success, /// returns the static public key of the remote as well as a NoiseStream. // TODO(mimoo, philp9): this code could be inlined in transport.rs once the monolithic network is done pub async fn upgrade_connection<TSocket>( &self, socket: TSocket, origin: ConnectionOrigin, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, remote_public_key: Option<x25519::PublicKey>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(x25519::PublicKey, NoiseStream<TSocket>)> where TSocket: AsyncRead + AsyncWrite + Unpin, { // perform the noise handshake let socket = match origin { ConnectionOrigin::Outbound => { let remote_public_key = match remote_public_key { Some(key) => key, None if cfg!(any(test, feature = "fuzzing")) => unreachable!(), None => { return Err(std::io::Error::new( std::io::ErrorKind::Other, "noise: SHOULD NOT HAPPEN: missing server's key when dialing", )); } }; self.dial(socket, anti_replay_timestamps.is_some(), remote_public_key) .await? } ConnectionOrigin::Inbound => { self.accept(socket, anti_replay_timestamps, trusted_peers) .await? } }; // return remote public key with a socket including the noise stream let remote_public_key = socket.get_remote_static(); Ok((remote_public_key, socket)) } pub async fn dial<TSocket>( &self, mut socket: TSocket, mutual_authentication: bool, remote_public_key: x25519::PublicKey, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // in mutual authenticated networks, send a payload of the current timestamp (in milliseconds) let payload = if mutual_authentication { let now: u64 = time::SystemTime::now() .duration_since(time::UNIX_EPOCH) .expect("system clock should work") .as_millis() as u64; // e.g. [157, 126, 253, 97, 114, 1, 0, 0] let now = now.to_le_bytes().to_vec(); Some(now) } else { None }; // create first handshake message (-> e, es, s, ss) let mut rng = rand::rngs::OsRng; let mut first_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; let initiator_state = self .0 .initiate_connection( &mut rng, &[], remote_public_key, payload.as_ref().map(|x| &x[..]), &mut first_message, ) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // write the first handshake message socket.write_all(&first_message).await?; // flush socket.flush().await?; // receive the server's response (<- e, ee, se) let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; socket.read_exact(&mut server_response).await?; // parse the server's response // TODO: security logging here? (mimoo) let (_, session) = self .0 .finalize_connection(initiator_state, &server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // finalize the connection Ok(NoiseStream::new(socket, session)) } pub async fn accept<TSocket>( &self, mut socket: TSocket, anti_replay_timestamps: Option<Arc<RwLock<AntiReplayTimestamps>>>, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<NoiseStream<TSocket>> where TSocket: AsyncRead + AsyncWrite + Unpin, { // receive the initiation message let mut client_init_message = [0u8; noise::handshake_init_msg_len(PAYLOAD_SIZE)]; socket.read_exact(&mut client_init_message).await?; // parse it let (their_public_key, handshake_state, payload) = self .0 .parse_client_init_message(&[], &client_init_message) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // make sure the public key is a validator before continuing (if we're in the validator network) if let Some(trusted_peers) = trusted_peers { let found = trusted_peers .read() .map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read trusted_peers lock", ) })? .iter() .any(|(_peer_id, public_keys)| public_keys.identity_public_key == their_public_key); if!found { // TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client connecting to us with an unknown public key: {}", their_public_key ), )); } } // if on a mutually authenticated network if let Some(anti_replay_timestamps) = &anti_replay_timestamps { // check that the payload received as the client timestamp (in seconds) if payload.len()!= PAYLOAD_SIZE { // TODO: security logging (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, "noise: client initiated connection without an 8-byte timestamp", )); } let mut client_timestamp = [0u8; PAYLOAD_SIZE]; client_timestamp.copy_from_slice(&payload); let client_timestamp = u64::from_le_bytes(client_timestamp); // check the timestamp is not a replay let mut anti_replay_timestamps = anti_replay_timestamps.write().map_err(|_| { io::Error::new( io::ErrorKind::Other, "noise: unable to read anti_replay_timestamps lock", ) })?; if anti_replay_timestamps.is_replay(their_public_key, client_timestamp) { // TODO: security logging the ip + blocking the ip? (mimoo) return Err(io::Error::new( io::ErrorKind::InvalidData, format!( "noise: client initiated connection with a timestamp already seen before: {}", client_timestamp ), )); } // store the timestamp anti_replay_timestamps.store_timestamp(their_public_key, client_timestamp); } // construct the response let mut rng = rand::rngs::OsRng; let mut server_response = [0u8; noise::handshake_resp_msg_len(0)]; let session = self .0 .respond_to_client(&mut rng, handshake_state, None, &mut server_response) .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; // send the response socket.write_all(&server_response).await?; // finalize the connection Ok(NoiseStream::new(socket, session)) } } // // Tests // ----- // #[cfg(test)] mod test { use super::*; use futures::{executor::block_on, future::join}; use libra_crypto::test_utils::TEST_SEED; use memsocket::MemorySocket; use std::{ collections::HashMap, io, sync::{Arc, RwLock}, }; use libra_crypto::traits::Uniform as _; use rand::SeedableRng as _; /// helper to setup two testing peers fn build_peers() -> ( (NoiseWrapper, x25519::PublicKey), (NoiseWrapper, x25519::PublicKey), ) { let mut rng = ::rand::rngs::StdRng::from_seed(TEST_SEED); let client_private = x25519::PrivateKey::generate(&mut rng); let client_public = client_private.public_key(); let server_private = x25519::PrivateKey::generate(&mut rng); let server_public = server_private.public_key(); let client = NoiseWrapper::new(client_private); let server = NoiseWrapper::new(server_private); ((client, client_public), (server, server_public)) } /// helper to perform a noise handshake with two peers fn perform_handshake( client: NoiseWrapper, server_public_key: x25519::PublicKey, server: NoiseWrapper, trusted_peers: Option<&Arc<RwLock<HashMap<PeerId, NetworkPeerInfo>>>>, ) -> io::Result<(NoiseStream<MemorySocket>, NoiseStream<MemorySocket>)> { // create an in-memory socket for testing let (dialer_socket, listener_socket) = MemorySocket::new_pair(); let anti_replay_timestamps = Arc::new(RwLock::new(AntiReplayTimestamps::default())); // perform the handshake let (client_session, server_session) = block_on(join( client.dial(dialer_socket, true, server_public_key), server.accept(listener_socket, Some(anti_replay_timestamps), trusted_peers), )); // Ok((client_session?, server_session?)) } #[test] fn test_handshake() { // perform handshake with two testing peers let ((client, client_public), (server, server_public)) = build_peers(); let (client, server) = perform_handshake(client, server_public, server, None).unwrap(); assert_eq!(client.get_remote_static(), server_public,); assert_eq!(server.get_remote_static(), client_public,); } }
is_replay
identifier_name
acpi.rs
//! This module provides access to ACPI. use core::convert::TryInto; use crate::utils; use crate::{Error, Ptr}; /// Signature of the RSDP structure. const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR "; /// Size of the SDT header. const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>(); /// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later /// specifications. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiRsdp20 { signature: [u8; 8], checksum: u8, oem_id: [u8; 6], revision: u8, rsdt_addr: u32, length: u32, xsdt_addr: u64, ext_checksum: u8, reserved: [u8; 3], } /// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+. #[derive(Debug)] pub struct Rsdp20 { rsdp20: AcpiRsdp20, } impl Rsdp20 { /// Creates a new `Rsdp20` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// RSDP 2.0+ structure. /// /// # Safety /// /// The `Rsdp20` structure is created using a pointer. Thus, this function /// is considered unsafe. pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> { let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20; let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr); // Check table's signature. if rsdp20.signature!= ACPI_RSDP_SIGNATURE { return Err(Error::InvalidSignature); } // Check table's revision. if rsdp20.revision < 2 { return Err(Error::InvalidRevision); } // Check table's checksum. let checksum = utils::add_bytes( &rsdp20 as *const AcpiRsdp20 as *const u8, rsdp20.length as usize, ); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(Rsdp20 { rsdp20 }) } /// Returns the Extended System Description Table (XSDT). pub fn xsdt(&self) -> Result<Xsdt, Error> { // An `Rsdp20` is only created after checking its signature, checksum // and revision. Thus, we assume that the pointer to the XSDT // will be valid. unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) } } } /// System Description Table types. enum SdtType { Xsdt, Madt, } impl SdtType { /// Returns the signature of the SDT. fn signature(&self) -> &[u8] { match self { SdtType::Xsdt => b"XSDT", SdtType::Madt => b"APIC", } } } /// System Description Table header of the ACPI specification. It is common to /// all System Description Tables. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiSdtHeader { signature: [u8; 4], length: u32, revision: u8, checksum: u8, oem_id: [u8; 6], oem_table_id: [u8; 8], oem_revision: u32, creator_id: u32, creator_revision: u32, } impl AcpiSdtHeader { /// Creates a new `AcpiSdtHeader` from a given pointer. /// /// # Errors /// /// This function returns error if the signature of the table does not /// match the provided `SdtType` or the checksum is invalid. unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> { // Parse SDT header. let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader; let hdr = core::ptr::read_unaligned(sdt_ptr); // Check SDT header's signature. if hdr.signature!= sdt_type.signature() { return Err(Error::InvalidSignature); } // Check SDT header's checksum. let checksum = utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(hdr) } } /// Maximum number of entries in the XSDT. const ACPI_XSDT_ENTRIES_LEN: usize = 32; /// Represents the Extended System Description Table (XSDT). #[derive(Debug)] pub struct Xsdt { entries: [u64; ACPI_XSDT_ENTRIES_LEN], num_entries: usize, } impl Xsdt { /// Creates a new `Xsdt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// XSDT. /// /// # Safety /// /// The `Xsdt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> { // Parse header. let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?; // Calculate number of entries. let entries_length = hdr.length as usize - ACPI_SDT_SIZE; if entries_length % 8!= 0 { return Err(Error::InvalidAcpiData); } let num_entries = entries_length / 8; // Check that there is enough room for the entries in the fixed size // array. if num_entries > ACPI_XSDT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } // Parse entries. let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN]; for (i, it) in entries.iter_mut().take(num_entries).enumerate() { let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8) as *const u64; *it = core::ptr::read_unaligned(ptr); } Ok(Xsdt { entries, num_entries, }) } /// Returns the Multiple APIC Description Table (MADT). pub fn madt(&self) -> Result<Madt, Error> { // An `Xsdt` is only created after checking its signature and checksum // Thus, we assume that the pointer to the MADT will be valid. for &entry in self.entries.iter().take(self.num_entries) { // Look for a table with the correct signature. let ptr = entry as *const [u8; 4]; let signature = unsafe { core::ptr::read_unaligned(ptr) }; if signature == SdtType::Madt.signature() { return unsafe { Madt::new(entry.try_into()?) }; } } // If we reach this point, the table could not be found. Err(Error::NotFound) } } /// Size of the SDT header. const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>(); /// Maximum number of entries in the MADT. const ACPI_MADT_ENTRIES_LEN: usize = 256; /// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI /// specification. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiMadtFields { lapic_addr: u32, flags: u32, } /// Processor Local APIC Structure in the ACPI specification. #[repr(C, packed)] struct AcpiMadtLapic { ty: u8, length: u8, proc_uid: u8, apic_id: u8, flags: u32, } /// Represents a Processor Local APIC Structure. #[derive(Debug, Default, Clone, Copy)] pub struct MadtLapic { proc_uid: u8, apic_id: u8, flags: u32, } impl MadtLapic { /// Processor's UID. pub fn proc_uid(&self) -> u8 { self.proc_uid } /// Processor's local APIC ID. pub fn acpi_id(&self) -> u8 { self.apic_id } /// Local APIC flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | Enabled /// 1 | 1 | Online Capable /// 2 | 30 | Reserved (zero) pub fn flags(&self) -> u32 { self.flags } } /// Represents the Multiple APIC Description Table (MADT). #[derive(Debug)] pub struct Madt { fields: AcpiMadtFields, lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN], num_lapic_entries: usize, } impl Madt { /// Creates a new `Madt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// MADT. /// /// # Safety /// /// The `Madt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> { // Parse header. let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?; // Parse fields. let fields = core::ptr::read_unaligned( (madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE) as *const AcpiMadtFields, ); // Parse entries. let mut num_lapic_entries = 0; let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN]; let mut ptr = (madt_ptr.0 as *const u8) .add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE); let end = (madt_ptr.0 as *const u8).add(hdr.length as usize); while ptr < end { let ty = core::ptr::read_unaligned(ptr); let length = core::ptr::read_unaligned(ptr.add(1)); // LAPIC. if ty == 0 { if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } let lapic = core::ptr::read_unaligned(ptr as *const AcpiMadtLapic); lapic_entries[num_lapic_entries] = MadtLapic { proc_uid: lapic.proc_uid, apic_id: lapic.apic_id, flags: lapic.flags, }; num_lapic_entries += 1; } ptr = ptr.add(length as usize); } Ok(Madt { fields, lapic_entries, num_lapic_entries, }) } /// Local Interrupt Controller Address. In other words, the 32-bit physical /// address at which each processor can access its local interrupt /// controller. pub fn lapic_addr(&self) -> u32 { self.fields.lapic_addr } /// Multiple ACPI flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | PCAT_COMPAT /// 1 | 31 | Reserved (zero) pub fn
(&self) -> u32 { self.fields.flags } /// Returns the detected local APIC structures. pub fn lapic(&self) -> &[MadtLapic] { &self.lapic_entries[..self.num_lapic_entries] } }
flags
identifier_name
acpi.rs
//! This module provides access to ACPI. use core::convert::TryInto; use crate::utils; use crate::{Error, Ptr}; /// Signature of the RSDP structure. const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR "; /// Size of the SDT header. const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>(); /// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later /// specifications. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiRsdp20 { signature: [u8; 8], checksum: u8, oem_id: [u8; 6], revision: u8, rsdt_addr: u32, length: u32, xsdt_addr: u64, ext_checksum: u8, reserved: [u8; 3], } /// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+. #[derive(Debug)] pub struct Rsdp20 { rsdp20: AcpiRsdp20, } impl Rsdp20 { /// Creates a new `Rsdp20` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// RSDP 2.0+ structure. /// /// # Safety /// /// The `Rsdp20` structure is created using a pointer. Thus, this function /// is considered unsafe. pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> { let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20; let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr); // Check table's signature. if rsdp20.signature!= ACPI_RSDP_SIGNATURE { return Err(Error::InvalidSignature); } // Check table's revision. if rsdp20.revision < 2 { return Err(Error::InvalidRevision); } // Check table's checksum. let checksum = utils::add_bytes( &rsdp20 as *const AcpiRsdp20 as *const u8, rsdp20.length as usize, ); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(Rsdp20 { rsdp20 }) } /// Returns the Extended System Description Table (XSDT). pub fn xsdt(&self) -> Result<Xsdt, Error> { // An `Rsdp20` is only created after checking its signature, checksum // and revision. Thus, we assume that the pointer to the XSDT // will be valid. unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) } } } /// System Description Table types. enum SdtType { Xsdt, Madt, } impl SdtType { /// Returns the signature of the SDT. fn signature(&self) -> &[u8] { match self { SdtType::Xsdt => b"XSDT", SdtType::Madt => b"APIC", } } } /// System Description Table header of the ACPI specification. It is common to /// all System Description Tables. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiSdtHeader { signature: [u8; 4], length: u32, revision: u8, checksum: u8, oem_id: [u8; 6], oem_table_id: [u8; 8], oem_revision: u32, creator_id: u32, creator_revision: u32, } impl AcpiSdtHeader { /// Creates a new `AcpiSdtHeader` from a given pointer. /// /// # Errors /// /// This function returns error if the signature of the table does not /// match the provided `SdtType` or the checksum is invalid. unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> { // Parse SDT header. let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader; let hdr = core::ptr::read_unaligned(sdt_ptr); // Check SDT header's signature. if hdr.signature!= sdt_type.signature() { return Err(Error::InvalidSignature); } // Check SDT header's checksum. let checksum = utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(hdr) } } /// Maximum number of entries in the XSDT. const ACPI_XSDT_ENTRIES_LEN: usize = 32; /// Represents the Extended System Description Table (XSDT). #[derive(Debug)] pub struct Xsdt { entries: [u64; ACPI_XSDT_ENTRIES_LEN], num_entries: usize, } impl Xsdt { /// Creates a new `Xsdt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// XSDT. /// /// # Safety /// /// The `Xsdt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> { // Parse header. let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?; // Calculate number of entries. let entries_length = hdr.length as usize - ACPI_SDT_SIZE; if entries_length % 8!= 0 { return Err(Error::InvalidAcpiData); } let num_entries = entries_length / 8; // Check that there is enough room for the entries in the fixed size // array. if num_entries > ACPI_XSDT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } // Parse entries. let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN]; for (i, it) in entries.iter_mut().take(num_entries).enumerate() { let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8) as *const u64; *it = core::ptr::read_unaligned(ptr); } Ok(Xsdt { entries, num_entries, }) } /// Returns the Multiple APIC Description Table (MADT). pub fn madt(&self) -> Result<Madt, Error> { // An `Xsdt` is only created after checking its signature and checksum // Thus, we assume that the pointer to the MADT will be valid. for &entry in self.entries.iter().take(self.num_entries) { // Look for a table with the correct signature. let ptr = entry as *const [u8; 4]; let signature = unsafe { core::ptr::read_unaligned(ptr) }; if signature == SdtType::Madt.signature()
} // If we reach this point, the table could not be found. Err(Error::NotFound) } } /// Size of the SDT header. const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>(); /// Maximum number of entries in the MADT. const ACPI_MADT_ENTRIES_LEN: usize = 256; /// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI /// specification. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiMadtFields { lapic_addr: u32, flags: u32, } /// Processor Local APIC Structure in the ACPI specification. #[repr(C, packed)] struct AcpiMadtLapic { ty: u8, length: u8, proc_uid: u8, apic_id: u8, flags: u32, } /// Represents a Processor Local APIC Structure. #[derive(Debug, Default, Clone, Copy)] pub struct MadtLapic { proc_uid: u8, apic_id: u8, flags: u32, } impl MadtLapic { /// Processor's UID. pub fn proc_uid(&self) -> u8 { self.proc_uid } /// Processor's local APIC ID. pub fn acpi_id(&self) -> u8 { self.apic_id } /// Local APIC flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | Enabled /// 1 | 1 | Online Capable /// 2 | 30 | Reserved (zero) pub fn flags(&self) -> u32 { self.flags } } /// Represents the Multiple APIC Description Table (MADT). #[derive(Debug)] pub struct Madt { fields: AcpiMadtFields, lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN], num_lapic_entries: usize, } impl Madt { /// Creates a new `Madt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// MADT. /// /// # Safety /// /// The `Madt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> { // Parse header. let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?; // Parse fields. let fields = core::ptr::read_unaligned( (madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE) as *const AcpiMadtFields, ); // Parse entries. let mut num_lapic_entries = 0; let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN]; let mut ptr = (madt_ptr.0 as *const u8) .add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE); let end = (madt_ptr.0 as *const u8).add(hdr.length as usize); while ptr < end { let ty = core::ptr::read_unaligned(ptr); let length = core::ptr::read_unaligned(ptr.add(1)); // LAPIC. if ty == 0 { if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } let lapic = core::ptr::read_unaligned(ptr as *const AcpiMadtLapic); lapic_entries[num_lapic_entries] = MadtLapic { proc_uid: lapic.proc_uid, apic_id: lapic.apic_id, flags: lapic.flags, }; num_lapic_entries += 1; } ptr = ptr.add(length as usize); } Ok(Madt { fields, lapic_entries, num_lapic_entries, }) } /// Local Interrupt Controller Address. In other words, the 32-bit physical /// address at which each processor can access its local interrupt /// controller. pub fn lapic_addr(&self) -> u32 { self.fields.lapic_addr } /// Multiple ACPI flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | PCAT_COMPAT /// 1 | 31 | Reserved (zero) pub fn flags(&self) -> u32 { self.fields.flags } /// Returns the detected local APIC structures. pub fn lapic(&self) -> &[MadtLapic] { &self.lapic_entries[..self.num_lapic_entries] } }
{ return unsafe { Madt::new(entry.try_into()?) }; }
conditional_block
acpi.rs
//! This module provides access to ACPI. use core::convert::TryInto; use crate::utils; use crate::{Error, Ptr}; /// Signature of the RSDP structure. const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR "; /// Size of the SDT header. const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>(); /// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later /// specifications. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiRsdp20 { signature: [u8; 8], checksum: u8, oem_id: [u8; 6], revision: u8, rsdt_addr: u32, length: u32, xsdt_addr: u64, ext_checksum: u8, reserved: [u8; 3], } /// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+. #[derive(Debug)] pub struct Rsdp20 { rsdp20: AcpiRsdp20, } impl Rsdp20 { /// Creates a new `Rsdp20` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// RSDP 2.0+ structure. /// /// # Safety /// /// The `Rsdp20` structure is created using a pointer. Thus, this function /// is considered unsafe. pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> { let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20; let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr); // Check table's signature. if rsdp20.signature!= ACPI_RSDP_SIGNATURE { return Err(Error::InvalidSignature); } // Check table's revision. if rsdp20.revision < 2 { return Err(Error::InvalidRevision); } // Check table's checksum. let checksum = utils::add_bytes( &rsdp20 as *const AcpiRsdp20 as *const u8, rsdp20.length as usize, ); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(Rsdp20 { rsdp20 }) } /// Returns the Extended System Description Table (XSDT). pub fn xsdt(&self) -> Result<Xsdt, Error> { // An `Rsdp20` is only created after checking its signature, checksum // and revision. Thus, we assume that the pointer to the XSDT // will be valid. unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) } } } /// System Description Table types. enum SdtType { Xsdt, Madt, } impl SdtType { /// Returns the signature of the SDT. fn signature(&self) -> &[u8] { match self { SdtType::Xsdt => b"XSDT", SdtType::Madt => b"APIC", } } } /// System Description Table header of the ACPI specification. It is common to /// all System Description Tables. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiSdtHeader { signature: [u8; 4], length: u32, revision: u8, checksum: u8, oem_id: [u8; 6], oem_table_id: [u8; 8], oem_revision: u32, creator_id: u32, creator_revision: u32, } impl AcpiSdtHeader { /// Creates a new `AcpiSdtHeader` from a given pointer. /// /// # Errors /// /// This function returns error if the signature of the table does not /// match the provided `SdtType` or the checksum is invalid. unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> { // Parse SDT header. let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader; let hdr = core::ptr::read_unaligned(sdt_ptr); // Check SDT header's signature. if hdr.signature!= sdt_type.signature() { return Err(Error::InvalidSignature); } // Check SDT header's checksum. let checksum = utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(hdr) } } /// Maximum number of entries in the XSDT. const ACPI_XSDT_ENTRIES_LEN: usize = 32; /// Represents the Extended System Description Table (XSDT). #[derive(Debug)] pub struct Xsdt { entries: [u64; ACPI_XSDT_ENTRIES_LEN], num_entries: usize, } impl Xsdt { /// Creates a new `Xsdt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// XSDT. /// /// # Safety /// /// The `Xsdt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> { // Parse header. let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?; // Calculate number of entries. let entries_length = hdr.length as usize - ACPI_SDT_SIZE; if entries_length % 8!= 0 { return Err(Error::InvalidAcpiData); } let num_entries = entries_length / 8; // Check that there is enough room for the entries in the fixed size // array. if num_entries > ACPI_XSDT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } // Parse entries. let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN]; for (i, it) in entries.iter_mut().take(num_entries).enumerate() { let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8) as *const u64; *it = core::ptr::read_unaligned(ptr); } Ok(Xsdt { entries, num_entries, }) } /// Returns the Multiple APIC Description Table (MADT). pub fn madt(&self) -> Result<Madt, Error> { // An `Xsdt` is only created after checking its signature and checksum // Thus, we assume that the pointer to the MADT will be valid. for &entry in self.entries.iter().take(self.num_entries) { // Look for a table with the correct signature. let ptr = entry as *const [u8; 4]; let signature = unsafe { core::ptr::read_unaligned(ptr) }; if signature == SdtType::Madt.signature() { return unsafe { Madt::new(entry.try_into()?) }; } } // If we reach this point, the table could not be found. Err(Error::NotFound) } } /// Size of the SDT header. const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>(); /// Maximum number of entries in the MADT. const ACPI_MADT_ENTRIES_LEN: usize = 256; /// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI /// specification. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiMadtFields { lapic_addr: u32, flags: u32, } /// Processor Local APIC Structure in the ACPI specification. #[repr(C, packed)] struct AcpiMadtLapic { ty: u8, length: u8, proc_uid: u8, apic_id: u8, flags: u32, } /// Represents a Processor Local APIC Structure. #[derive(Debug, Default, Clone, Copy)] pub struct MadtLapic { proc_uid: u8, apic_id: u8, flags: u32, } impl MadtLapic { /// Processor's UID. pub fn proc_uid(&self) -> u8 { self.proc_uid } /// Processor's local APIC ID. pub fn acpi_id(&self) -> u8 { self.apic_id } /// Local APIC flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | Enabled /// 1 | 1 | Online Capable /// 2 | 30 | Reserved (zero) pub fn flags(&self) -> u32 { self.flags } } /// Represents the Multiple APIC Description Table (MADT). #[derive(Debug)] pub struct Madt { fields: AcpiMadtFields, lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN], num_lapic_entries: usize, } impl Madt { /// Creates a new `Madt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// MADT. /// /// # Safety /// /// The `Madt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error>
let length = core::ptr::read_unaligned(ptr.add(1)); // LAPIC. if ty == 0 { if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } let lapic = core::ptr::read_unaligned(ptr as *const AcpiMadtLapic); lapic_entries[num_lapic_entries] = MadtLapic { proc_uid: lapic.proc_uid, apic_id: lapic.apic_id, flags: lapic.flags, }; num_lapic_entries += 1; } ptr = ptr.add(length as usize); } Ok(Madt { fields, lapic_entries, num_lapic_entries, }) } /// Local Interrupt Controller Address. In other words, the 32-bit physical /// address at which each processor can access its local interrupt /// controller. pub fn lapic_addr(&self) -> u32 { self.fields.lapic_addr } /// Multiple ACPI flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | PCAT_COMPAT /// 1 | 31 | Reserved (zero) pub fn flags(&self) -> u32 { self.fields.flags } /// Returns the detected local APIC structures. pub fn lapic(&self) -> &[MadtLapic] { &self.lapic_entries[..self.num_lapic_entries] } }
{ // Parse header. let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?; // Parse fields. let fields = core::ptr::read_unaligned( (madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE) as *const AcpiMadtFields, ); // Parse entries. let mut num_lapic_entries = 0; let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN]; let mut ptr = (madt_ptr.0 as *const u8) .add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE); let end = (madt_ptr.0 as *const u8).add(hdr.length as usize); while ptr < end { let ty = core::ptr::read_unaligned(ptr);
identifier_body
acpi.rs
//! This module provides access to ACPI. use core::convert::TryInto; use crate::utils; use crate::{Error, Ptr}; /// Signature of the RSDP structure. const ACPI_RSDP_SIGNATURE: &[u8] = b"RSD PTR "; /// Size of the SDT header. const ACPI_SDT_SIZE: usize = core::mem::size_of::<AcpiSdtHeader>(); /// Root System Description Pointer (RSDP) structure of the ACPI 2.0 and later /// specifications. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiRsdp20 { signature: [u8; 8], checksum: u8, oem_id: [u8; 6], revision: u8, rsdt_addr: u32, length: u32, xsdt_addr: u64, ext_checksum: u8, reserved: [u8; 3], } /// Represents the Root System Description Pointer (RSDP) of ACPI 2.0+. #[derive(Debug)] pub struct Rsdp20 { rsdp20: AcpiRsdp20, } impl Rsdp20 { /// Creates a new `Rsdp20` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// RSDP 2.0+ structure. /// /// # Safety /// /// The `Rsdp20` structure is created using a pointer. Thus, this function /// is considered unsafe. pub unsafe fn new(rsdp20_ptr: Ptr) -> Result<Self, Error> { let rsdp20_ptr = rsdp20_ptr.0 as *const AcpiRsdp20; let rsdp20 = core::ptr::read_unaligned(rsdp20_ptr); // Check table's signature. if rsdp20.signature!= ACPI_RSDP_SIGNATURE { return Err(Error::InvalidSignature); } // Check table's revision. if rsdp20.revision < 2 { return Err(Error::InvalidRevision); } // Check table's checksum. let checksum = utils::add_bytes( &rsdp20 as *const AcpiRsdp20 as *const u8, rsdp20.length as usize, ); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(Rsdp20 { rsdp20 }) } /// Returns the Extended System Description Table (XSDT). pub fn xsdt(&self) -> Result<Xsdt, Error> { // An `Rsdp20` is only created after checking its signature, checksum // and revision. Thus, we assume that the pointer to the XSDT // will be valid. unsafe { Xsdt::new(self.rsdp20.xsdt_addr.try_into()?) } } } /// System Description Table types. enum SdtType {
Xsdt, Madt, } impl SdtType { /// Returns the signature of the SDT. fn signature(&self) -> &[u8] { match self { SdtType::Xsdt => b"XSDT", SdtType::Madt => b"APIC", } } } /// System Description Table header of the ACPI specification. It is common to /// all System Description Tables. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiSdtHeader { signature: [u8; 4], length: u32, revision: u8, checksum: u8, oem_id: [u8; 6], oem_table_id: [u8; 8], oem_revision: u32, creator_id: u32, creator_revision: u32, } impl AcpiSdtHeader { /// Creates a new `AcpiSdtHeader` from a given pointer. /// /// # Errors /// /// This function returns error if the signature of the table does not /// match the provided `SdtType` or the checksum is invalid. unsafe fn new(sdt_ptr: Ptr, sdt_type: SdtType) -> Result<Self, Error> { // Parse SDT header. let sdt_ptr = sdt_ptr.0 as *const AcpiSdtHeader; let hdr = core::ptr::read_unaligned(sdt_ptr); // Check SDT header's signature. if hdr.signature!= sdt_type.signature() { return Err(Error::InvalidSignature); } // Check SDT header's checksum. let checksum = utils::add_bytes(sdt_ptr as *const u8, hdr.length as usize); if checksum!= 0 { return Err(Error::InvalidCheckSum); } Ok(hdr) } } /// Maximum number of entries in the XSDT. const ACPI_XSDT_ENTRIES_LEN: usize = 32; /// Represents the Extended System Description Table (XSDT). #[derive(Debug)] pub struct Xsdt { entries: [u64; ACPI_XSDT_ENTRIES_LEN], num_entries: usize, } impl Xsdt { /// Creates a new `Xsdt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// XSDT. /// /// # Safety /// /// The `Xsdt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(xsdt_ptr: Ptr) -> Result<Self, Error> { // Parse header. let hdr = AcpiSdtHeader::new(xsdt_ptr, SdtType::Xsdt)?; // Calculate number of entries. let entries_length = hdr.length as usize - ACPI_SDT_SIZE; if entries_length % 8!= 0 { return Err(Error::InvalidAcpiData); } let num_entries = entries_length / 8; // Check that there is enough room for the entries in the fixed size // array. if num_entries > ACPI_XSDT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } // Parse entries. let mut entries = [0u64; ACPI_XSDT_ENTRIES_LEN]; for (i, it) in entries.iter_mut().take(num_entries).enumerate() { let ptr = (xsdt_ptr.0 as *const u8).add(ACPI_SDT_SIZE + i * 8) as *const u64; *it = core::ptr::read_unaligned(ptr); } Ok(Xsdt { entries, num_entries, }) } /// Returns the Multiple APIC Description Table (MADT). pub fn madt(&self) -> Result<Madt, Error> { // An `Xsdt` is only created after checking its signature and checksum // Thus, we assume that the pointer to the MADT will be valid. for &entry in self.entries.iter().take(self.num_entries) { // Look for a table with the correct signature. let ptr = entry as *const [u8; 4]; let signature = unsafe { core::ptr::read_unaligned(ptr) }; if signature == SdtType::Madt.signature() { return unsafe { Madt::new(entry.try_into()?) }; } } // If we reach this point, the table could not be found. Err(Error::NotFound) } } /// Size of the SDT header. const ACPI_MADT_FIELDS_SIZE: usize = core::mem::size_of::<AcpiMadtFields>(); /// Maximum number of entries in the MADT. const ACPI_MADT_ENTRIES_LEN: usize = 256; /// Extra fields of the Multiple APIC Description Table (MADT) in the ACPI /// specification. #[derive(Debug, Clone, Copy)] #[repr(C, packed)] struct AcpiMadtFields { lapic_addr: u32, flags: u32, } /// Processor Local APIC Structure in the ACPI specification. #[repr(C, packed)] struct AcpiMadtLapic { ty: u8, length: u8, proc_uid: u8, apic_id: u8, flags: u32, } /// Represents a Processor Local APIC Structure. #[derive(Debug, Default, Clone, Copy)] pub struct MadtLapic { proc_uid: u8, apic_id: u8, flags: u32, } impl MadtLapic { /// Processor's UID. pub fn proc_uid(&self) -> u8 { self.proc_uid } /// Processor's local APIC ID. pub fn acpi_id(&self) -> u8 { self.apic_id } /// Local APIC flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | Enabled /// 1 | 1 | Online Capable /// 2 | 30 | Reserved (zero) pub fn flags(&self) -> u32 { self.flags } } /// Represents the Multiple APIC Description Table (MADT). #[derive(Debug)] pub struct Madt { fields: AcpiMadtFields, lapic_entries: [MadtLapic; ACPI_MADT_ENTRIES_LEN], num_lapic_entries: usize, } impl Madt { /// Creates a new `Madt` from a given pointer. /// /// # Errors /// /// This function returns error if the pointer does not point to a valid /// MADT. /// /// # Safety /// /// The `Madt` structure is created using a pointer. Thus, this function is /// considered unsafe. pub unsafe fn new(madt_ptr: Ptr) -> Result<Madt, Error> { // Parse header. let hdr = AcpiSdtHeader::new(madt_ptr, SdtType::Madt)?; // Parse fields. let fields = core::ptr::read_unaligned( (madt_ptr.0 as *const u8).add(ACPI_SDT_SIZE) as *const AcpiMadtFields, ); // Parse entries. let mut num_lapic_entries = 0; let mut lapic_entries = [MadtLapic::default(); ACPI_MADT_ENTRIES_LEN]; let mut ptr = (madt_ptr.0 as *const u8) .add(ACPI_SDT_SIZE + ACPI_MADT_FIELDS_SIZE); let end = (madt_ptr.0 as *const u8).add(hdr.length as usize); while ptr < end { let ty = core::ptr::read_unaligned(ptr); let length = core::ptr::read_unaligned(ptr.add(1)); // LAPIC. if ty == 0 { if num_lapic_entries >= ACPI_MADT_ENTRIES_LEN { return Err(Error::BufferTooSmall); } let lapic = core::ptr::read_unaligned(ptr as *const AcpiMadtLapic); lapic_entries[num_lapic_entries] = MadtLapic { proc_uid: lapic.proc_uid, apic_id: lapic.apic_id, flags: lapic.flags, }; num_lapic_entries += 1; } ptr = ptr.add(length as usize); } Ok(Madt { fields, lapic_entries, num_lapic_entries, }) } /// Local Interrupt Controller Address. In other words, the 32-bit physical /// address at which each processor can access its local interrupt /// controller. pub fn lapic_addr(&self) -> u32 { self.fields.lapic_addr } /// Multiple ACPI flags. /// /// Bit offset | Bit length | Flag /// ---------- | ---------- | --------------- /// 0 | 1 | PCAT_COMPAT /// 1 | 31 | Reserved (zero) pub fn flags(&self) -> u32 { self.fields.flags } /// Returns the detected local APIC structures. pub fn lapic(&self) -> &[MadtLapic] { &self.lapic_entries[..self.num_lapic_entries] } }
random_line_split
snapshot.rs
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #[cfg(target_family = "unix")] use crate::disk_usage; use crate::{ format_error, image::{Block, Image}, }; use clap::ValueEnum; use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader}; #[cfg(not(target_family = "unix"))] use std::env::consts::OS; use std::{ fs::{metadata, OpenOptions}, num::NonZeroU64, ops::Range, path::{Path, PathBuf}, }; #[derive(thiserror::Error)] pub enum Error { #[error("unable to parse elf structures: {0}")] Elf(elf::ParseError), #[error("locked down /proc/kcore")] LockedDownKcore, #[error( "estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes" )] DiskUsageEstimateExceeded { estimated: u64, allowed: u64 }, #[error("unable to create memory snapshot")] UnableToCreateMemorySnapshot(#[from] crate::image::Error), #[error("unable to create memory snapshot from source: {1}")] UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source), #[error("unable to create memory snapshot: {0}")] UnableToCreateSnapshot(String), #[error("{0}: {1}")] Other(&'static str, String), #[error("disk error")] Disk(#[source] std::io::Error), } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format_error(self, f) } } pub(crate) type Result<T> = std::result::Result<T, Error>; #[derive(Debug, Clone, ValueEnum)] pub enum Source { /// Provides a read-only view of physical memory. Access to memory using /// this device must be paged aligned and read one page at a time. /// /// On RHEL based distributions, this device is frequently provided by /// default. A loadable kernel module version is available as part of /// the Linux utility `crash`: /// <https://github.com/crash-utility/crash/tree/master/memory_driver> #[value(name = "/dev/crash")] DevCrash, /// Provides a read-write view of physical memory, though AVML opens it in a /// read-only fashion. Access to to memory using this device can be /// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM` /// or `CONFIG_IO_STRICT_DEVMEM`. /// /// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be /// accessed. #[value(name = "/dev/mem")] DevMem, /// Provides a virtual ELF coredump of kernel memory. This can be used to /// access physical memory. /// /// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but /// is either inaccessible or doesn't allow access to all of the kernel /// memory. #[value(name = "/proc/kcore")] ProcKcore, /// User-specified path to a raw memory file #[value(skip)] Raw(PathBuf), } impl std::fmt::Display for Source { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::DevCrash => write!(f, "/dev/crash"), Self::DevMem => write!(f, "/dev/mem"), Self::ProcKcore => write!(f, "/proc/kcore"), Self::Raw(path) => write!(f, "{}", path.display()), } } } #[must_use] fn can_open(src: &Path) -> bool { OpenOptions::new().read(true).open(src).is_ok() } // The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical // memory in size. // // If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is // either inaccessible or doesn't allow access to all of the kernel memory. // // /dev/mem and /dev/crash, if available, are devices, rather than virtual // files. As such, we don't check those for size. #[must_use] fn is_kcore_ok() -> bool { metadata(Path::new("/proc/kcore")) .map(|x| x.len() > 0x2000) .unwrap_or(false) && can_open(Path::new("/proc/kcore")) } // try to perform an action, either returning on success, or having the result // of the error in an indented string. // // This special cases `DiskUsageEstimateExceeded` errors, as we want this to // fail fast and bail out of the `try_method` caller. macro_rules! try_method { ($func:expr) => {{ match $func { Ok(x) => return Ok(x), Err(err) => { if matches!( err, Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}), ) { return Err(err); } crate::indent(format!("{:?}", err), 4) } } }}; } pub struct Snapshot<'a, 'b> { source: Option<&'b Source>, destination: &'a Path, memory_ranges: Vec<Range<u64>>, version: u32, max_disk_usage: Option<NonZeroU64>, max_disk_usage_percentage: Option<f64>, } impl<'a, 'b> Snapshot<'a, 'b> { /// Create a new memory snapshot. /// /// The default version implements the `LiME` format. #[must_use] pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self { Self { source: None, destination, memory_ranges, version: 1, max_disk_usage: None, max_disk_usage_percentage: None, } } /// Specify the maximum disk usage to stay under as a percentage /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self { Self { max_disk_usage_percentage, ..self } } /// Specify the maximum disk space in MB to use /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self { Self { max_disk_usage, ..self } } /// Specify the source for creating the snapshot #[must_use] pub fn source(self, source: Option<&'b Source>) -> Self { Self { source,..self } } /// Specify the version of the snapshot format #[must_use] pub fn version(self, version: u32) -> Self { Self { version,..self } } fn create_source(&self, src: &Source) -> Result<()> { match src { Source::ProcKcore => self.kcore(), Source::DevCrash => self.phys(Path::new("/dev/crash")), Source::DevMem => self.phys(Path::new("/dev/mem")), Source::Raw(s) => self.phys(s), } .map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone())) } /// Create a memory snapshot pub fn create(&self) -> Result<()> { if let Some(src) = self.source { self.create_source(src)?; } else if self.destination == Path::new("/dev/stdout") { // If we're writing to stdout, we can't start over if reading from a // source fails. As such, we need to do more work to pick a source // rather than just trying all available options. if is_kcore_ok() { self.create_source(&Source::ProcKcore)?; } else if can_open(Path::new("/dev/crash")) { self.create_source(&Source::DevCrash)?; } else if can_open(Path::new("/dev/mem")) { self.create_source(&Source::DevMem)?; } else { return Err(Error::UnableToCreateSnapshot( "no source available".to_string(), )); } } else { let crash_err = try_method!(self.create_source(&Source::DevCrash)); let kcore_err = try_method!(self.create_source(&Source::ProcKcore)); let devmem_err = try_method!(self.create_source(&Source::DevMem)); let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n"); return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4))); } Ok(()) } // given a set of ranges from iomem and a set of Blocks derived from the // pseudo-elf phys section headers, derive a set of ranges that can be used // to create a snapshot. fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> { let mut result = vec![]; 'outer: for range in ranges { let mut range = range.clone(); 'inner: for header in headers { match ( header.range.contains(&range.start), // TODO: ranges is currently inclusive, but not a // RangeInclusive. this should be adjusted. header.range.contains(&(range.end - 1)), ) { (true, true) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.clone(), }; result.push(block); continue 'outer; } (true, false) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.start..header.range.end, }; result.push(block); range.start = header.range.end; } _ => { continue 'inner; } }; } } result } /// Check disk usage of the destination /// /// NOTE: This requires `Image` because we want to ensure this is called /// after the file is created. #[cfg(target_family = "unix")] fn check_disk_usage(&self, _: &Image) -> Result<()> { disk_usage::check( self.destination, &self.memory_ranges, self.max_disk_usage, self.max_disk_usage_percentage, ) } /// Check disk usage of the destination /// /// On non-Unix platforms, this operation is a no-op. #[cfg(not(target_family = "unix"))] fn check_disk_usage(&self, _: &Image) -> Result<()> { if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() { return Err(Error::Other( "unable to check disk usage on this platform", format!("os:{OS}"), )); } Ok(()) } fn kcore(&self) -> Result<()> { if!is_kcore_ok() { return Err(Error::LockedDownKcore); } let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?; self.check_disk_usage(&image)?; let file = elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?; let mut segments: Vec<&ProgramHeader> = file .segments() .iter() .filter(|x| x.p_type == PT_LOAD) .collect(); segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr)); let first_vaddr = segments .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))? .p_vaddr; let first_start = self .memory_ranges .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))? .start; let start = first_vaddr - first_start; let mut physical_ranges = vec![]; for phdr in segments { let entry_start = phdr.p_vaddr - start; let entry_end = entry_start + phdr.p_memsz; physical_ranges.push(Block { range: entry_start..entry_end, offset: phdr.p_offset, }); } let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges); image.write_blocks(&blocks)?; Ok(()) } fn phys(&self, mem: &Path) -> Result<()> { let is_crash = mem == Path::new("/dev/crash"); let blocks = self .memory_ranges .iter() .map(|x| Block { offset: x.start, range: if is_crash { x.start..((x.end >> 12) << 12) } else { x.start..x.end }, }) .collect::<Vec<_>>(); let mut image = Image::new(self.version, mem, self.destination)?; self.check_disk_usage(&image)?; image.write_blocks(&blocks)?; Ok(()) } } #[cfg(test)]
let ranges = [10..20, 30..35, 45..55]; let core_ranges = [ Block { range: 10..20, offset: 0, }, Block { range: 25..35, offset: 10, }, Block { range: 40..50, offset: 20, }, Block { range: 50..55, offset: 35, }, ]; let expected = vec![ Block { offset: 0, range: 10..20, }, Block { offset: 10 + 5, range: 30..35, }, Block { offset: 25, range: 45..50, }, Block { offset: 35, range: 50..55, }, ]; let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges); assert_eq!(result, expected); } }
mod tests { use super::*; #[test] fn translate_ranges() {
random_line_split
snapshot.rs
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #[cfg(target_family = "unix")] use crate::disk_usage; use crate::{ format_error, image::{Block, Image}, }; use clap::ValueEnum; use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader}; #[cfg(not(target_family = "unix"))] use std::env::consts::OS; use std::{ fs::{metadata, OpenOptions}, num::NonZeroU64, ops::Range, path::{Path, PathBuf}, }; #[derive(thiserror::Error)] pub enum Error { #[error("unable to parse elf structures: {0}")] Elf(elf::ParseError), #[error("locked down /proc/kcore")] LockedDownKcore, #[error( "estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes" )] DiskUsageEstimateExceeded { estimated: u64, allowed: u64 }, #[error("unable to create memory snapshot")] UnableToCreateMemorySnapshot(#[from] crate::image::Error), #[error("unable to create memory snapshot from source: {1}")] UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source), #[error("unable to create memory snapshot: {0}")] UnableToCreateSnapshot(String), #[error("{0}: {1}")] Other(&'static str, String), #[error("disk error")] Disk(#[source] std::io::Error), } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format_error(self, f) } } pub(crate) type Result<T> = std::result::Result<T, Error>; #[derive(Debug, Clone, ValueEnum)] pub enum Source { /// Provides a read-only view of physical memory. Access to memory using /// this device must be paged aligned and read one page at a time. /// /// On RHEL based distributions, this device is frequently provided by /// default. A loadable kernel module version is available as part of /// the Linux utility `crash`: /// <https://github.com/crash-utility/crash/tree/master/memory_driver> #[value(name = "/dev/crash")] DevCrash, /// Provides a read-write view of physical memory, though AVML opens it in a /// read-only fashion. Access to to memory using this device can be /// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM` /// or `CONFIG_IO_STRICT_DEVMEM`. /// /// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be /// accessed. #[value(name = "/dev/mem")] DevMem, /// Provides a virtual ELF coredump of kernel memory. This can be used to /// access physical memory. /// /// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but /// is either inaccessible or doesn't allow access to all of the kernel /// memory. #[value(name = "/proc/kcore")] ProcKcore, /// User-specified path to a raw memory file #[value(skip)] Raw(PathBuf), } impl std::fmt::Display for Source { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::DevCrash => write!(f, "/dev/crash"), Self::DevMem => write!(f, "/dev/mem"), Self::ProcKcore => write!(f, "/proc/kcore"), Self::Raw(path) => write!(f, "{}", path.display()), } } } #[must_use] fn can_open(src: &Path) -> bool { OpenOptions::new().read(true).open(src).is_ok() } // The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical // memory in size. // // If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is // either inaccessible or doesn't allow access to all of the kernel memory. // // /dev/mem and /dev/crash, if available, are devices, rather than virtual // files. As such, we don't check those for size. #[must_use] fn is_kcore_ok() -> bool
// try to perform an action, either returning on success, or having the result // of the error in an indented string. // // This special cases `DiskUsageEstimateExceeded` errors, as we want this to // fail fast and bail out of the `try_method` caller. macro_rules! try_method { ($func:expr) => {{ match $func { Ok(x) => return Ok(x), Err(err) => { if matches!( err, Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}), ) { return Err(err); } crate::indent(format!("{:?}", err), 4) } } }}; } pub struct Snapshot<'a, 'b> { source: Option<&'b Source>, destination: &'a Path, memory_ranges: Vec<Range<u64>>, version: u32, max_disk_usage: Option<NonZeroU64>, max_disk_usage_percentage: Option<f64>, } impl<'a, 'b> Snapshot<'a, 'b> { /// Create a new memory snapshot. /// /// The default version implements the `LiME` format. #[must_use] pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self { Self { source: None, destination, memory_ranges, version: 1, max_disk_usage: None, max_disk_usage_percentage: None, } } /// Specify the maximum disk usage to stay under as a percentage /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self { Self { max_disk_usage_percentage, ..self } } /// Specify the maximum disk space in MB to use /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self { Self { max_disk_usage, ..self } } /// Specify the source for creating the snapshot #[must_use] pub fn source(self, source: Option<&'b Source>) -> Self { Self { source,..self } } /// Specify the version of the snapshot format #[must_use] pub fn version(self, version: u32) -> Self { Self { version,..self } } fn create_source(&self, src: &Source) -> Result<()> { match src { Source::ProcKcore => self.kcore(), Source::DevCrash => self.phys(Path::new("/dev/crash")), Source::DevMem => self.phys(Path::new("/dev/mem")), Source::Raw(s) => self.phys(s), } .map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone())) } /// Create a memory snapshot pub fn create(&self) -> Result<()> { if let Some(src) = self.source { self.create_source(src)?; } else if self.destination == Path::new("/dev/stdout") { // If we're writing to stdout, we can't start over if reading from a // source fails. As such, we need to do more work to pick a source // rather than just trying all available options. if is_kcore_ok() { self.create_source(&Source::ProcKcore)?; } else if can_open(Path::new("/dev/crash")) { self.create_source(&Source::DevCrash)?; } else if can_open(Path::new("/dev/mem")) { self.create_source(&Source::DevMem)?; } else { return Err(Error::UnableToCreateSnapshot( "no source available".to_string(), )); } } else { let crash_err = try_method!(self.create_source(&Source::DevCrash)); let kcore_err = try_method!(self.create_source(&Source::ProcKcore)); let devmem_err = try_method!(self.create_source(&Source::DevMem)); let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n"); return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4))); } Ok(()) } // given a set of ranges from iomem and a set of Blocks derived from the // pseudo-elf phys section headers, derive a set of ranges that can be used // to create a snapshot. fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> { let mut result = vec![]; 'outer: for range in ranges { let mut range = range.clone(); 'inner: for header in headers { match ( header.range.contains(&range.start), // TODO: ranges is currently inclusive, but not a // RangeInclusive. this should be adjusted. header.range.contains(&(range.end - 1)), ) { (true, true) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.clone(), }; result.push(block); continue 'outer; } (true, false) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.start..header.range.end, }; result.push(block); range.start = header.range.end; } _ => { continue 'inner; } }; } } result } /// Check disk usage of the destination /// /// NOTE: This requires `Image` because we want to ensure this is called /// after the file is created. #[cfg(target_family = "unix")] fn check_disk_usage(&self, _: &Image) -> Result<()> { disk_usage::check( self.destination, &self.memory_ranges, self.max_disk_usage, self.max_disk_usage_percentage, ) } /// Check disk usage of the destination /// /// On non-Unix platforms, this operation is a no-op. #[cfg(not(target_family = "unix"))] fn check_disk_usage(&self, _: &Image) -> Result<()> { if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() { return Err(Error::Other( "unable to check disk usage on this platform", format!("os:{OS}"), )); } Ok(()) } fn kcore(&self) -> Result<()> { if!is_kcore_ok() { return Err(Error::LockedDownKcore); } let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?; self.check_disk_usage(&image)?; let file = elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?; let mut segments: Vec<&ProgramHeader> = file .segments() .iter() .filter(|x| x.p_type == PT_LOAD) .collect(); segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr)); let first_vaddr = segments .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))? .p_vaddr; let first_start = self .memory_ranges .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))? .start; let start = first_vaddr - first_start; let mut physical_ranges = vec![]; for phdr in segments { let entry_start = phdr.p_vaddr - start; let entry_end = entry_start + phdr.p_memsz; physical_ranges.push(Block { range: entry_start..entry_end, offset: phdr.p_offset, }); } let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges); image.write_blocks(&blocks)?; Ok(()) } fn phys(&self, mem: &Path) -> Result<()> { let is_crash = mem == Path::new("/dev/crash"); let blocks = self .memory_ranges .iter() .map(|x| Block { offset: x.start, range: if is_crash { x.start..((x.end >> 12) << 12) } else { x.start..x.end }, }) .collect::<Vec<_>>(); let mut image = Image::new(self.version, mem, self.destination)?; self.check_disk_usage(&image)?; image.write_blocks(&blocks)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn translate_ranges() { let ranges = [10..20, 30..35, 45..55]; let core_ranges = [ Block { range: 10..20, offset: 0, }, Block { range: 25..35, offset: 10, }, Block { range: 40..50, offset: 20, }, Block { range: 50..55, offset: 35, }, ]; let expected = vec![ Block { offset: 0, range: 10..20, }, Block { offset: 10 + 5, range: 30..35, }, Block { offset: 25, range: 45..50, }, Block { offset: 35, range: 50..55, }, ]; let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges); assert_eq!(result, expected); } }
{ metadata(Path::new("/proc/kcore")) .map(|x| x.len() > 0x2000) .unwrap_or(false) && can_open(Path::new("/proc/kcore")) }
identifier_body
snapshot.rs
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #[cfg(target_family = "unix")] use crate::disk_usage; use crate::{ format_error, image::{Block, Image}, }; use clap::ValueEnum; use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader}; #[cfg(not(target_family = "unix"))] use std::env::consts::OS; use std::{ fs::{metadata, OpenOptions}, num::NonZeroU64, ops::Range, path::{Path, PathBuf}, }; #[derive(thiserror::Error)] pub enum Error { #[error("unable to parse elf structures: {0}")] Elf(elf::ParseError), #[error("locked down /proc/kcore")] LockedDownKcore, #[error( "estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes" )] DiskUsageEstimateExceeded { estimated: u64, allowed: u64 }, #[error("unable to create memory snapshot")] UnableToCreateMemorySnapshot(#[from] crate::image::Error), #[error("unable to create memory snapshot from source: {1}")] UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source), #[error("unable to create memory snapshot: {0}")] UnableToCreateSnapshot(String), #[error("{0}: {1}")] Other(&'static str, String), #[error("disk error")] Disk(#[source] std::io::Error), } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format_error(self, f) } } pub(crate) type Result<T> = std::result::Result<T, Error>; #[derive(Debug, Clone, ValueEnum)] pub enum Source { /// Provides a read-only view of physical memory. Access to memory using /// this device must be paged aligned and read one page at a time. /// /// On RHEL based distributions, this device is frequently provided by /// default. A loadable kernel module version is available as part of /// the Linux utility `crash`: /// <https://github.com/crash-utility/crash/tree/master/memory_driver> #[value(name = "/dev/crash")] DevCrash, /// Provides a read-write view of physical memory, though AVML opens it in a /// read-only fashion. Access to to memory using this device can be /// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM` /// or `CONFIG_IO_STRICT_DEVMEM`. /// /// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be /// accessed. #[value(name = "/dev/mem")] DevMem, /// Provides a virtual ELF coredump of kernel memory. This can be used to /// access physical memory. /// /// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but /// is either inaccessible or doesn't allow access to all of the kernel /// memory. #[value(name = "/proc/kcore")] ProcKcore, /// User-specified path to a raw memory file #[value(skip)] Raw(PathBuf), } impl std::fmt::Display for Source { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::DevCrash => write!(f, "/dev/crash"), Self::DevMem => write!(f, "/dev/mem"), Self::ProcKcore => write!(f, "/proc/kcore"), Self::Raw(path) => write!(f, "{}", path.display()), } } } #[must_use] fn can_open(src: &Path) -> bool { OpenOptions::new().read(true).open(src).is_ok() } // The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical // memory in size. // // If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is // either inaccessible or doesn't allow access to all of the kernel memory. // // /dev/mem and /dev/crash, if available, are devices, rather than virtual // files. As such, we don't check those for size. #[must_use] fn is_kcore_ok() -> bool { metadata(Path::new("/proc/kcore")) .map(|x| x.len() > 0x2000) .unwrap_or(false) && can_open(Path::new("/proc/kcore")) } // try to perform an action, either returning on success, or having the result // of the error in an indented string. // // This special cases `DiskUsageEstimateExceeded` errors, as we want this to // fail fast and bail out of the `try_method` caller. macro_rules! try_method { ($func:expr) => {{ match $func { Ok(x) => return Ok(x), Err(err) => { if matches!( err, Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}), ) { return Err(err); } crate::indent(format!("{:?}", err), 4) } } }}; } pub struct Snapshot<'a, 'b> { source: Option<&'b Source>, destination: &'a Path, memory_ranges: Vec<Range<u64>>, version: u32, max_disk_usage: Option<NonZeroU64>, max_disk_usage_percentage: Option<f64>, } impl<'a, 'b> Snapshot<'a, 'b> { /// Create a new memory snapshot. /// /// The default version implements the `LiME` format. #[must_use] pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self { Self { source: None, destination, memory_ranges, version: 1, max_disk_usage: None, max_disk_usage_percentage: None, } } /// Specify the maximum disk usage to stay under as a percentage /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self { Self { max_disk_usage_percentage, ..self } } /// Specify the maximum disk space in MB to use /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self { Self { max_disk_usage, ..self } } /// Specify the source for creating the snapshot #[must_use] pub fn source(self, source: Option<&'b Source>) -> Self { Self { source,..self } } /// Specify the version of the snapshot format #[must_use] pub fn version(self, version: u32) -> Self { Self { version,..self } } fn create_source(&self, src: &Source) -> Result<()> { match src { Source::ProcKcore => self.kcore(), Source::DevCrash => self.phys(Path::new("/dev/crash")), Source::DevMem => self.phys(Path::new("/dev/mem")), Source::Raw(s) => self.phys(s), } .map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone())) } /// Create a memory snapshot pub fn create(&self) -> Result<()> { if let Some(src) = self.source { self.create_source(src)?; } else if self.destination == Path::new("/dev/stdout") { // If we're writing to stdout, we can't start over if reading from a // source fails. As such, we need to do more work to pick a source // rather than just trying all available options. if is_kcore_ok() { self.create_source(&Source::ProcKcore)?; } else if can_open(Path::new("/dev/crash")) { self.create_source(&Source::DevCrash)?; } else if can_open(Path::new("/dev/mem")) { self.create_source(&Source::DevMem)?; } else { return Err(Error::UnableToCreateSnapshot( "no source available".to_string(), )); } } else { let crash_err = try_method!(self.create_source(&Source::DevCrash)); let kcore_err = try_method!(self.create_source(&Source::ProcKcore)); let devmem_err = try_method!(self.create_source(&Source::DevMem)); let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n"); return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4))); } Ok(()) } // given a set of ranges from iomem and a set of Blocks derived from the // pseudo-elf phys section headers, derive a set of ranges that can be used // to create a snapshot. fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> { let mut result = vec![]; 'outer: for range in ranges { let mut range = range.clone(); 'inner: for header in headers { match ( header.range.contains(&range.start), // TODO: ranges is currently inclusive, but not a // RangeInclusive. this should be adjusted. header.range.contains(&(range.end - 1)), ) { (true, true) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.clone(), }; result.push(block); continue 'outer; } (true, false) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.start..header.range.end, }; result.push(block); range.start = header.range.end; } _ => { continue 'inner; } }; } } result } /// Check disk usage of the destination /// /// NOTE: This requires `Image` because we want to ensure this is called /// after the file is created. #[cfg(target_family = "unix")] fn check_disk_usage(&self, _: &Image) -> Result<()> { disk_usage::check( self.destination, &self.memory_ranges, self.max_disk_usage, self.max_disk_usage_percentage, ) } /// Check disk usage of the destination /// /// On non-Unix platforms, this operation is a no-op. #[cfg(not(target_family = "unix"))] fn check_disk_usage(&self, _: &Image) -> Result<()> { if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() { return Err(Error::Other( "unable to check disk usage on this platform", format!("os:{OS}"), )); } Ok(()) } fn kcore(&self) -> Result<()> { if!is_kcore_ok() { return Err(Error::LockedDownKcore); } let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?; self.check_disk_usage(&image)?; let file = elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?; let mut segments: Vec<&ProgramHeader> = file .segments() .iter() .filter(|x| x.p_type == PT_LOAD) .collect(); segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr)); let first_vaddr = segments .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))? .p_vaddr; let first_start = self .memory_ranges .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))? .start; let start = first_vaddr - first_start; let mut physical_ranges = vec![]; for phdr in segments { let entry_start = phdr.p_vaddr - start; let entry_end = entry_start + phdr.p_memsz; physical_ranges.push(Block { range: entry_start..entry_end, offset: phdr.p_offset, }); } let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges); image.write_blocks(&blocks)?; Ok(()) } fn phys(&self, mem: &Path) -> Result<()> { let is_crash = mem == Path::new("/dev/crash"); let blocks = self .memory_ranges .iter() .map(|x| Block { offset: x.start, range: if is_crash { x.start..((x.end >> 12) << 12) } else { x.start..x.end }, }) .collect::<Vec<_>>(); let mut image = Image::new(self.version, mem, self.destination)?; self.check_disk_usage(&image)?; image.write_blocks(&blocks)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn
() { let ranges = [10..20, 30..35, 45..55]; let core_ranges = [ Block { range: 10..20, offset: 0, }, Block { range: 25..35, offset: 10, }, Block { range: 40..50, offset: 20, }, Block { range: 50..55, offset: 35, }, ]; let expected = vec![ Block { offset: 0, range: 10..20, }, Block { offset: 10 + 5, range: 30..35, }, Block { offset: 25, range: 45..50, }, Block { offset: 35, range: 50..55, }, ]; let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges); assert_eq!(result, expected); } }
translate_ranges
identifier_name
snapshot.rs
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #[cfg(target_family = "unix")] use crate::disk_usage; use crate::{ format_error, image::{Block, Image}, }; use clap::ValueEnum; use elf::{abi::PT_LOAD, endian::NativeEndian, segment::ProgramHeader}; #[cfg(not(target_family = "unix"))] use std::env::consts::OS; use std::{ fs::{metadata, OpenOptions}, num::NonZeroU64, ops::Range, path::{Path, PathBuf}, }; #[derive(thiserror::Error)] pub enum Error { #[error("unable to parse elf structures: {0}")] Elf(elf::ParseError), #[error("locked down /proc/kcore")] LockedDownKcore, #[error( "estimated usage exceeds specified bounds: estimated size:{estimated} bytes. allowed:{allowed} bytes" )] DiskUsageEstimateExceeded { estimated: u64, allowed: u64 }, #[error("unable to create memory snapshot")] UnableToCreateMemorySnapshot(#[from] crate::image::Error), #[error("unable to create memory snapshot from source: {1}")] UnableToCreateSnapshotFromSource(#[source] Box<Error>, Source), #[error("unable to create memory snapshot: {0}")] UnableToCreateSnapshot(String), #[error("{0}: {1}")] Other(&'static str, String), #[error("disk error")] Disk(#[source] std::io::Error), } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { format_error(self, f) } } pub(crate) type Result<T> = std::result::Result<T, Error>; #[derive(Debug, Clone, ValueEnum)] pub enum Source { /// Provides a read-only view of physical memory. Access to memory using /// this device must be paged aligned and read one page at a time. /// /// On RHEL based distributions, this device is frequently provided by /// default. A loadable kernel module version is available as part of /// the Linux utility `crash`: /// <https://github.com/crash-utility/crash/tree/master/memory_driver> #[value(name = "/dev/crash")] DevCrash, /// Provides a read-write view of physical memory, though AVML opens it in a /// read-only fashion. Access to to memory using this device can be /// disabled using the kernel configuration options `CONFIG_STRICT_DEVMEM` /// or `CONFIG_IO_STRICT_DEVMEM`. /// /// With `CONFIG_STRICT_DEVMEM`, only the first 1MB of memory can be /// accessed. #[value(name = "/dev/mem")] DevMem, /// Provides a virtual ELF coredump of kernel memory. This can be used to /// access physical memory. /// /// If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but /// is either inaccessible or doesn't allow access to all of the kernel /// memory. #[value(name = "/proc/kcore")] ProcKcore, /// User-specified path to a raw memory file #[value(skip)] Raw(PathBuf), } impl std::fmt::Display for Source { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::DevCrash => write!(f, "/dev/crash"), Self::DevMem => write!(f, "/dev/mem"), Self::ProcKcore => write!(f, "/proc/kcore"), Self::Raw(path) => write!(f, "{}", path.display()), } } } #[must_use] fn can_open(src: &Path) -> bool { OpenOptions::new().read(true).open(src).is_ok() } // The file /proc/kcore is a pseudo-file in ELF core format that is 4KB+physical // memory in size. // // If LOCKDOWN_KCORE is set in the kernel, then /proc/kcore may exist but is // either inaccessible or doesn't allow access to all of the kernel memory. // // /dev/mem and /dev/crash, if available, are devices, rather than virtual // files. As such, we don't check those for size. #[must_use] fn is_kcore_ok() -> bool { metadata(Path::new("/proc/kcore")) .map(|x| x.len() > 0x2000) .unwrap_or(false) && can_open(Path::new("/proc/kcore")) } // try to perform an action, either returning on success, or having the result // of the error in an indented string. // // This special cases `DiskUsageEstimateExceeded` errors, as we want this to // fail fast and bail out of the `try_method` caller. macro_rules! try_method { ($func:expr) => {{ match $func { Ok(x) => return Ok(x), Err(err) => { if matches!( err, Error::UnableToCreateSnapshotFromSource(ref x, _) if matches!(x.as_ref(), Error::DiskUsageEstimateExceeded{..}), ) { return Err(err); } crate::indent(format!("{:?}", err), 4) } } }}; } pub struct Snapshot<'a, 'b> { source: Option<&'b Source>, destination: &'a Path, memory_ranges: Vec<Range<u64>>, version: u32, max_disk_usage: Option<NonZeroU64>, max_disk_usage_percentage: Option<f64>, } impl<'a, 'b> Snapshot<'a, 'b> { /// Create a new memory snapshot. /// /// The default version implements the `LiME` format. #[must_use] pub fn new(destination: &'a Path, memory_ranges: Vec<Range<u64>>) -> Self { Self { source: None, destination, memory_ranges, version: 1, max_disk_usage: None, max_disk_usage_percentage: None, } } /// Specify the maximum disk usage to stay under as a percentage /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage_percentage(self, max_disk_usage_percentage: Option<f64>) -> Self { Self { max_disk_usage_percentage, ..self } } /// Specify the maximum disk space in MB to use /// /// This is an estimation, calculated at start time #[must_use] pub fn max_disk_usage(self, max_disk_usage: Option<NonZeroU64>) -> Self { Self { max_disk_usage, ..self } } /// Specify the source for creating the snapshot #[must_use] pub fn source(self, source: Option<&'b Source>) -> Self { Self { source,..self } } /// Specify the version of the snapshot format #[must_use] pub fn version(self, version: u32) -> Self { Self { version,..self } } fn create_source(&self, src: &Source) -> Result<()> { match src { Source::ProcKcore => self.kcore(), Source::DevCrash => self.phys(Path::new("/dev/crash")), Source::DevMem => self.phys(Path::new("/dev/mem")), Source::Raw(s) => self.phys(s), } .map_err(|e| Error::UnableToCreateSnapshotFromSource(Box::new(e), src.clone())) } /// Create a memory snapshot pub fn create(&self) -> Result<()> { if let Some(src) = self.source { self.create_source(src)?; } else if self.destination == Path::new("/dev/stdout") { // If we're writing to stdout, we can't start over if reading from a // source fails. As such, we need to do more work to pick a source // rather than just trying all available options. if is_kcore_ok()
else if can_open(Path::new("/dev/crash")) { self.create_source(&Source::DevCrash)?; } else if can_open(Path::new("/dev/mem")) { self.create_source(&Source::DevMem)?; } else { return Err(Error::UnableToCreateSnapshot( "no source available".to_string(), )); } } else { let crash_err = try_method!(self.create_source(&Source::DevCrash)); let kcore_err = try_method!(self.create_source(&Source::ProcKcore)); let devmem_err = try_method!(self.create_source(&Source::DevMem)); let reason = [String::new(), crash_err, kcore_err, devmem_err].join("\n"); return Err(Error::UnableToCreateSnapshot(crate::indent(reason, 4))); } Ok(()) } // given a set of ranges from iomem and a set of Blocks derived from the // pseudo-elf phys section headers, derive a set of ranges that can be used // to create a snapshot. fn find_kcore_blocks(ranges: &[Range<u64>], headers: &[Block]) -> Vec<Block> { let mut result = vec![]; 'outer: for range in ranges { let mut range = range.clone(); 'inner: for header in headers { match ( header.range.contains(&range.start), // TODO: ranges is currently inclusive, but not a // RangeInclusive. this should be adjusted. header.range.contains(&(range.end - 1)), ) { (true, true) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.clone(), }; result.push(block); continue 'outer; } (true, false) => { let block = Block { offset: header.offset + range.start - header.range.start, range: range.start..header.range.end, }; result.push(block); range.start = header.range.end; } _ => { continue 'inner; } }; } } result } /// Check disk usage of the destination /// /// NOTE: This requires `Image` because we want to ensure this is called /// after the file is created. #[cfg(target_family = "unix")] fn check_disk_usage(&self, _: &Image) -> Result<()> { disk_usage::check( self.destination, &self.memory_ranges, self.max_disk_usage, self.max_disk_usage_percentage, ) } /// Check disk usage of the destination /// /// On non-Unix platforms, this operation is a no-op. #[cfg(not(target_family = "unix"))] fn check_disk_usage(&self, _: &Image) -> Result<()> { if self.max_disk_usage.is_some() || self.max_disk_usage_percentage.is_some() { return Err(Error::Other( "unable to check disk usage on this platform", format!("os:{OS}"), )); } Ok(()) } fn kcore(&self) -> Result<()> { if!is_kcore_ok() { return Err(Error::LockedDownKcore); } let mut image = Image::new(self.version, Path::new("/proc/kcore"), self.destination)?; self.check_disk_usage(&image)?; let file = elf::ElfStream::<NativeEndian, _>::open_stream(&mut image.src).map_err(Error::Elf)?; let mut segments: Vec<&ProgramHeader> = file .segments() .iter() .filter(|x| x.p_type == PT_LOAD) .collect(); segments.sort_by(|a, b| a.p_vaddr.cmp(&b.p_vaddr)); let first_vaddr = segments .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial addresses".to_string()))? .p_vaddr; let first_start = self .memory_ranges .get(0) .ok_or_else(|| Error::UnableToCreateSnapshot("no initial memory range".to_string()))? .start; let start = first_vaddr - first_start; let mut physical_ranges = vec![]; for phdr in segments { let entry_start = phdr.p_vaddr - start; let entry_end = entry_start + phdr.p_memsz; physical_ranges.push(Block { range: entry_start..entry_end, offset: phdr.p_offset, }); } let blocks = Self::find_kcore_blocks(&self.memory_ranges, &physical_ranges); image.write_blocks(&blocks)?; Ok(()) } fn phys(&self, mem: &Path) -> Result<()> { let is_crash = mem == Path::new("/dev/crash"); let blocks = self .memory_ranges .iter() .map(|x| Block { offset: x.start, range: if is_crash { x.start..((x.end >> 12) << 12) } else { x.start..x.end }, }) .collect::<Vec<_>>(); let mut image = Image::new(self.version, mem, self.destination)?; self.check_disk_usage(&image)?; image.write_blocks(&blocks)?; Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn translate_ranges() { let ranges = [10..20, 30..35, 45..55]; let core_ranges = [ Block { range: 10..20, offset: 0, }, Block { range: 25..35, offset: 10, }, Block { range: 40..50, offset: 20, }, Block { range: 50..55, offset: 35, }, ]; let expected = vec![ Block { offset: 0, range: 10..20, }, Block { offset: 10 + 5, range: 30..35, }, Block { offset: 25, range: 45..50, }, Block { offset: 35, range: 50..55, }, ]; let result = Snapshot::find_kcore_blocks(&ranges, &core_ranges); assert_eq!(result, expected); } }
{ self.create_source(&Source::ProcKcore)?; }
conditional_block
row.rs
notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #![allow(dead_code)] use std::{alloc::{alloc, dealloc, Layout}, env, fmt, mem, slice, io, io::Read, ops::{Deref, DerefMut}}; use once_cell::sync::Lazy; use byteorder::{LittleEndian, ReadBytesExt}; use anyhow::{bail, Context, Result}; use super::*; #[repr(C, packed)] pub union RowAux { pub remote_scn: [u8; 6], pub run_crc: u32, } extern { type RowData; } #[repr(C, packed)] pub struct Row { pub header_crc32c: u32, pub lsn: i64, pub scn: i64, pub tag: u16, shard_id: u16, aux: RowAux, tm: f64, pub len: u32, pub data_crc32c: u32, _data: RowData, } const ROW_LAYOUT : Layout = unsafe { Layout::from_size_align_unchecked(46, 16) }; pub struct BoxRow { ptr: *mut Row } impl Deref for BoxRow { type Target = Row; fn deref(&self) -> &Self::Target { unsafe { &*self.ptr } } } impl DerefMut for BoxRow { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *self.ptr } } } impl Drop for BoxRow { fn drop(&mut self) { unsafe { dealloc(self.ptr as *mut _, Row::layout(self.len)); } } } impl Row { fn layout(len: u32) -> Layout { assert!(len < 2<<10); let data = Layout::from_size_align(len as usize, 1).unwrap(); ROW_LAYOUT.extend_packed(data).unwrap() } fn alloc(len: u32) -> *mut Row { unsafe { let ptr = alloc(Self::layout(len)) as *mut Row; (*ptr).len = len; ptr } } fn data_ptr(&self) -> *const u8 { let ptr = self as *const _ as *const u8; unsafe { ptr.add(ROW_LAYOUT.size()) } } pub fn data(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.data_ptr(), self.len as usize) } } pub fn data_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.data_ptr() as *mut _, self.len as usize) } } pub fn read(io: &mut dyn Read) -> Result<BoxRow> { let mut header = [0; ROW_LAYOUT.size()]; io.read_exact(&mut header).context("reading header")?; let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap(); let header_crc32c_calculated = crc32c(&header[4..]); if header_crc32c_calculated!= header_crc32c { bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}",
header_crc32c, header_crc32c_calculated); } let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap(); let mut row = BoxRow { ptr: Self::alloc(len) }; row.as_bytes_mut().copy_from_slice(&header); debug_assert!(row.len == len); io.read_exact(row.data_mut()).context("reading body")?; if crc32c(row.data())!= row.data_crc32c { bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}", {row.data_crc32c}, crc32c(row.data())); } log::debug!("read row LSN:{}", {row.lsn}); Ok(row) } pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> { io.write_all(self.as_bytes())?; // FIXME: nasty and unportable io.write_all(self.data())?; Ok(ROW_LAYOUT.size() + self.data().len()) } fn as_bytes(&self) -> &[u8] { unsafe { slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size()) } } fn as_bytes_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size()) } } pub fn update_crc(&mut self) { self.data_crc32c = crc32c(self.data()); self.header_crc32c = crc32c(&self.as_bytes()[4..]) } fn tag(&self) -> Tag { Tag::new(self.tag & TAG_MASK) } fn tag_type(&self) -> TagType { TagType::new(self.tag &!TAG_MASK) } } #[derive(Debug)] #[derive(PartialEq, Eq)] pub enum Tag { SnapInitial, SnapData, WalData, SnapFinal, WalFinal, RunCrc, Nop, RaftAppend, RaftCommit, RaftVote, ShardCreate, ShardAlter, ShardFinal, Tlv, SysTag(u8), UserTag(u8), } impl Tag { fn new(repr: u16) -> Self { match repr & TAG_MASK { 1 => Tag::SnapInitial, 2 => Tag::SnapData, 3 => Tag::WalData, 4 => Tag::SnapFinal, 5 => Tag::WalFinal, 6 => Tag::RunCrc, 7 => Tag::Nop, 8 => Tag::RaftAppend, 9 => Tag::RaftCommit, 10 => Tag::RaftVote, 11 => Tag::ShardCreate, 12 => Tag::ShardAlter, 13 => Tag::ShardFinal, 14 => Tag::Tlv, t if t < 32 => Tag::SysTag(t as u8), t => Tag::UserTag((t >> 5) as u8), } } fn as_u16(&self) -> u16 { match self { Tag::SnapInitial => 1, Tag::SnapData => 2, Tag::WalData => 3, Tag::SnapFinal => 4, Tag::WalFinal => 5, Tag::RunCrc => 6, Tag::Nop => 7, Tag::RaftAppend => 8, Tag::RaftCommit => 9, Tag::RaftVote => 10, Tag::ShardCreate => 11, Tag::ShardAlter => 12, Tag::ShardFinal => 13, Tag::Tlv => 14, Tag::SysTag(t) => *t as u16, Tag::UserTag(t) => *t as u16, } } } impl fmt::Display for Tag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Tag::SnapInitial => write!(f, "snap_initial"), Tag::SnapData => write!(f, "snap_data"), Tag::SnapFinal => write!(f, "snap_final"), Tag::WalData => write!(f, "wal_data"), Tag::WalFinal => write!(f, "wal_final"), Tag::ShardCreate => write!(f, "shard_create"), Tag::ShardAlter => write!(f, "shard_alter"), Tag::ShardFinal => write!(f, "shard_final"), Tag::RunCrc => write!(f, "run_crc"), Tag::Nop => write!(f, "nop"), Tag::RaftAppend => write!(f, "raft_append"), Tag::RaftCommit => write!(f, "raft_commit"), Tag::RaftVote => write!(f, "raft_vote"), Tag::Tlv => write!(f, "tlv"), Tag::SysTag(n) => write!(f, "sys{}", n), Tag::UserTag(n) => write!(f, "usr{}", n) } } } /* two highest bit in tag encode tag type: 00 - invalid 01 - snap 10 - wal 11 - system wal */ pub const TAG_MASK: u16 = 0x3fff; const TAG_SIZE: usize = 14; enum TagType { SNAP = 0x4000, WAL = 0x8000, SYS = 0xc000, INVALID = 0, } impl TagType { fn new(repr: u16) -> TagType { match repr &!TAG_MASK { 0x4000 => TagType::SNAP, 0x8000 => TagType::WAL, 0xc000 => TagType::SYS, _ => TagType::INVALID, } } } impl fmt::Display for TagType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { TagType::SNAP => write!(f, "snap"), TagType::WAL => write!(f, "wal"), TagType::SYS => write!(f, "sys"), TagType::INVALID => write!(f, "invalid"), } } } #[derive(PartialEq, Eq)] enum ShardType { POR, RAFT, PART } impl ShardType { fn new(repr: u8) -> Result<Self> { match repr { 0 => Ok(ShardType::POR), 1 => Ok(ShardType::RAFT), 2 => Ok(ShardType::PART), _ => bail!("invalid shard type {}", repr) } } } // TODO: switch to byteordered? struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>); impl<'a> LittleEndianReader<'a> { fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) } fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() } fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() } fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() } fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() } fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() } fn read_str(&mut self, len: usize) -> &str { let pos = self.0.position() as usize; let raw = &self.0.get_ref()[pos..pos+len]; let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len)); let str = std::str::from_utf8(raw).unwrap(); self.0.set_position((pos+len) as u64); str } fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 } fn unparsed(&self) -> &[u8] { &self.0.get_ref()[self.0.position() as usize..] } } pub fn print_row<W: fmt::Write + fmt::Debug>(buf: &mut W, row: &Row, handler: &dyn Fn(&mut W, u16, &[u8])) -> Result<()> { fn int_flag(name: &str, default: bool) -> bool { let flag = try { let val = env::var(name).ok()?; val.parse::<usize>().ok()? }; Some(1) == flag || default } static PRINT_HEADER : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_ROW_HEADER", true) }); static PRINT_RUN_CRC : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_RUN_CRC", false) }); let tag = row.tag(); if *PRINT_HEADER || true { write!(buf, "lsn:{}", {row.lsn})?; if row.scn!= -1 || tag == Tag::RaftVote || tag == Tag::SnapData { write!(buf, " shard:{}", {row.shard_id})?; if *PRINT_RUN_CRC { write!(buf, " run_crc:0x{:08x}", unsafe {row.aux.run_crc})?; } } write!(buf, " scn:{} tm:{:.3} t:{}/{} ", {row.scn}, {row.tm}, row.tag_type(), row.tag())?; } use mem::size_of; let mut reader = LittleEndianReader::new(row.data()); match row.tag() { Tag::SnapInitial => { if row.data().len() == size_of::<u32>() * 3 { let count = reader.read_u32(); let crc_log = reader.read_u32(); let crc_mod = reader.read_u32(); write!(buf, "count:{} run_crc_log:0x{:08x} run_crc_mod:0x{:08x}", count, crc_log, crc_mod)?; } else if row.scn == -1 { let ver = reader.read_u8(); let count = reader.read_u32(); let flags = reader.read_u32(); write!(buf, "ver:{} count:{} flags:0x{:08x}", ver, count, flags)?; } else { write!(buf, "unknow format")?; } }, Tag::RunCrc => { let mut scn = -1; if row.data().len() == size_of::<i64>() + 2 * size_of::<u32>() { scn = reader.read_i64(); } let crc_log = reader.read_u32(); let _ = reader.read_u32(); /* ignore run_crc_mod */ write!(buf, "SCN:{} log:0x{:08x}", scn, crc_log)?; }, Tag::SnapData | Tag::WalData | Tag::UserTag(_) | Tag::Tlv => { handler(buf, row.tag, row.data()); return Ok(()) } Tag::SnapFinal => { let mut end = [0u8; 4]; for i in 0..3 { end[i] = reader.read_u8() } if end!= ['E' as u8, 'N' as u8, 'D' as u8, 0] { write!(buf, " {:x?}", &end)?; } }, Tag::WalFinal => (), Tag::Nop => { if reader.unparsed().len() > 0 { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } } }, Tag::SysTag(_) => (), Tag::RaftAppend | Tag::RaftCommit => { let flags = reader.read_u16(); let term = reader.read_u64(); let inner_tag_raw = reader.read_u16(); let inner_tag = Tag::new(inner_tag_raw); write!(buf, "term:{} flags:0x{:02x} it:{} ", flags, term, inner_tag)?; match inner_tag { Tag::RunCrc => { let scn = reader.read_u64(); let log = reader.read_u32(); let _ = reader.read_u32(); /* ignore run_crc_mod */ write!(buf, "SCN:{} log:0x{:08x}", scn, log)?; }, Tag::Nop => { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } }, _ => { handler(buf, inner_tag_raw, reader.into_cursor().into_inner()); return Ok(()) } } }, Tag::RaftVote => { let flags = reader.read_u16(); let term = reader.read_u64(); let peer_id = reader.read_u8(); write!(buf, "term:{} flags:0x{:02x} peer:{}", term, flags, peer_id)?; }, Tag::ShardCreate | Tag::ShardAlter => { let ver = reader.read_u8(); if ver!= 1 { bail!("unknow version: {}", ver); } let shard_type = ShardType::new(reader.read_u8())?; let estimated_row_count = reader.read_u32(); match row.tag() { Tag::ShardCreate => write!(buf,"SHARD_CREATE")?, Tag::ShardAlter => write!(buf, "SHARD_ALTER")?, _ => unreachable!(), } write!(buf, " shard_id:{}", {row.shard_id})?; match shard_type { ShardType::RAFT => write!(buf, " RAFT")?, ShardType::POR => write!(buf, " POR")?, ShardType::PART => write!(buf, " PART")?, } let mod_name = reader.read_str(16); write!(buf, " {}", mod_name)?; write!(buf, " count:{} run_crc:0x{:08x}", estimated_row_count, unsafe { row.aux.run_crc })?; write!(buf, " master:{}", reader.read_str(16))?; for _ in 0..4 { let peer_name = reader.read_str(16); if peer_name.len() > 0 { write!(buf, " repl:{}", peer_name)?; } } let aux_len = reader.read_u16(); if aux_len > 0 { write!(buf, " aux:")?; for _ in 0..aux_len { let b = reader.read_u8(); write!(buf, "{:02x} ", b)?; } } }, Tag::ShardFinal => { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } }, } if reader.unparsed().len() > 0 { write!(buf, " unparsed: {:x?} ", reader.unparsed())?; } Ok(()) } #[test] fn test_print_row() { use std::{path::Path, fmt::Write}; println!("current dir {:?}", env::current_dir().unwrap()); let mut xlog = XLog::name(Path::new("testdata/00000000000000000002.xlog")).unwrap(); let mut buf = String::new(); env::set_var("OCTOPUS_CAT_ROW_HEADER", "1"); fn hexdump(buf: &mut String, _tag: u16, data: &[u8]) { write!(buf, " {:?x}", data).unwrap(); } if let IO::Read(reader) = &mut xlog.io { loop { match reader.read_row() { Ok(Some(row)) => { print_row(&mut buf, &row, &hexdump).unwrap(); println!("row {}", buf); buf.clear(); }, Ok(None) => break, Err(err) => { println!("fail {:?}", err); break; } } } } } mod ffi { use super::*; use crate::tbuf::TBuf; #[no_mangle] unsafe extern "C" fn print_row(out: *mut TBuf, row: *const Row, handler: Option<extern fn(out: *mut TBuf, tag: u16, data: *const TBuf) -> ()>) { let ret = super::print_row(&mut *out, &*row, &|buf: &mut TBuf, tag: u16,
random_line_split
row.rs
notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #![allow(dead_code)] use std::{alloc::{alloc, dealloc, Layout}, env, fmt, mem, slice, io, io::Read, ops::{Deref, DerefMut}}; use once_cell::sync::Lazy; use byteorder::{LittleEndian, ReadBytesExt}; use anyhow::{bail, Context, Result}; use super::*; #[repr(C, packed)] pub union RowAux { pub remote_scn: [u8; 6], pub run_crc: u32, } extern { type RowData; } #[repr(C, packed)] pub struct Row { pub header_crc32c: u32, pub lsn: i64, pub scn: i64, pub tag: u16, shard_id: u16, aux: RowAux, tm: f64, pub len: u32, pub data_crc32c: u32, _data: RowData, } const ROW_LAYOUT : Layout = unsafe { Layout::from_size_align_unchecked(46, 16) }; pub struct BoxRow { ptr: *mut Row } impl Deref for BoxRow { type Target = Row; fn deref(&self) -> &Self::Target { unsafe { &*self.ptr } } } impl DerefMut for BoxRow { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *self.ptr } } } impl Drop for BoxRow { fn drop(&mut self) { unsafe { dealloc(self.ptr as *mut _, Row::layout(self.len)); } } } impl Row { fn layout(len: u32) -> Layout { assert!(len < 2<<10); let data = Layout::from_size_align(len as usize, 1).unwrap(); ROW_LAYOUT.extend_packed(data).unwrap() } fn alloc(len: u32) -> *mut Row { unsafe { let ptr = alloc(Self::layout(len)) as *mut Row; (*ptr).len = len; ptr } } fn data_ptr(&self) -> *const u8 { let ptr = self as *const _ as *const u8; unsafe { ptr.add(ROW_LAYOUT.size()) } } pub fn data(&self) -> &[u8] { unsafe { slice::from_raw_parts(self.data_ptr(), self.len as usize) } } pub fn data_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self.data_ptr() as *mut _, self.len as usize) } } pub fn read(io: &mut dyn Read) -> Result<BoxRow> { let mut header = [0; ROW_LAYOUT.size()]; io.read_exact(&mut header).context("reading header")?; let header_crc32c = (&header[0..4]).read_u32::<LittleEndian>().unwrap(); let header_crc32c_calculated = crc32c(&header[4..]); if header_crc32c_calculated!= header_crc32c { bail!("header crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}", header_crc32c, header_crc32c_calculated); } let len = (&header[ROW_LAYOUT.size() - 8..]).read_u32::<LittleEndian>().unwrap(); let mut row = BoxRow { ptr: Self::alloc(len) }; row.as_bytes_mut().copy_from_slice(&header); debug_assert!(row.len == len); io.read_exact(row.data_mut()).context("reading body")?; if crc32c(row.data())!= row.data_crc32c { bail!("data crc32c mismatch: expected 0x{:08x}, calculated 0x{:08x}", {row.data_crc32c}, crc32c(row.data())); } log::debug!("read row LSN:{}", {row.lsn}); Ok(row) } pub fn write(&self, io: &mut dyn io::Write) -> io::Result<usize> { io.write_all(self.as_bytes())?; // FIXME: nasty and unportable io.write_all(self.data())?; Ok(ROW_LAYOUT.size() + self.data().len()) } fn as_bytes(&self) -> &[u8] { unsafe { slice::from_raw_parts(self as *const _ as *const u8, ROW_LAYOUT.size()) } } fn as_bytes_mut(&mut self) -> &mut [u8] { unsafe { slice::from_raw_parts_mut(self as *mut _ as *mut u8, ROW_LAYOUT.size()) } } pub fn update_crc(&mut self) { self.data_crc32c = crc32c(self.data()); self.header_crc32c = crc32c(&self.as_bytes()[4..]) } fn tag(&self) -> Tag { Tag::new(self.tag & TAG_MASK) } fn tag_type(&self) -> TagType { TagType::new(self.tag &!TAG_MASK) } } #[derive(Debug)] #[derive(PartialEq, Eq)] pub enum Tag { SnapInitial, SnapData, WalData, SnapFinal, WalFinal, RunCrc, Nop, RaftAppend, RaftCommit, RaftVote, ShardCreate, ShardAlter, ShardFinal, Tlv, SysTag(u8), UserTag(u8), } impl Tag { fn new(repr: u16) -> Self { match repr & TAG_MASK { 1 => Tag::SnapInitial, 2 => Tag::SnapData, 3 => Tag::WalData, 4 => Tag::SnapFinal, 5 => Tag::WalFinal, 6 => Tag::RunCrc, 7 => Tag::Nop, 8 => Tag::RaftAppend, 9 => Tag::RaftCommit, 10 => Tag::RaftVote, 11 => Tag::ShardCreate, 12 => Tag::ShardAlter, 13 => Tag::ShardFinal, 14 => Tag::Tlv, t if t < 32 => Tag::SysTag(t as u8), t => Tag::UserTag((t >> 5) as u8), } } fn as_u16(&self) -> u16 { match self { Tag::SnapInitial => 1, Tag::SnapData => 2, Tag::WalData => 3, Tag::SnapFinal => 4, Tag::WalFinal => 5, Tag::RunCrc => 6, Tag::Nop => 7, Tag::RaftAppend => 8, Tag::RaftCommit => 9, Tag::RaftVote => 10, Tag::ShardCreate => 11, Tag::ShardAlter => 12, Tag::ShardFinal => 13, Tag::Tlv => 14, Tag::SysTag(t) => *t as u16, Tag::UserTag(t) => *t as u16, } } } impl fmt::Display for Tag { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Tag::SnapInitial => write!(f, "snap_initial"), Tag::SnapData => write!(f, "snap_data"), Tag::SnapFinal => write!(f, "snap_final"), Tag::WalData => write!(f, "wal_data"), Tag::WalFinal => write!(f, "wal_final"), Tag::ShardCreate => write!(f, "shard_create"), Tag::ShardAlter => write!(f, "shard_alter"), Tag::ShardFinal => write!(f, "shard_final"), Tag::RunCrc => write!(f, "run_crc"), Tag::Nop => write!(f, "nop"), Tag::RaftAppend => write!(f, "raft_append"), Tag::RaftCommit => write!(f, "raft_commit"), Tag::RaftVote => write!(f, "raft_vote"), Tag::Tlv => write!(f, "tlv"), Tag::SysTag(n) => write!(f, "sys{}", n), Tag::UserTag(n) => write!(f, "usr{}", n) } } } /* two highest bit in tag encode tag type: 00 - invalid 01 - snap 10 - wal 11 - system wal */ pub const TAG_MASK: u16 = 0x3fff; const TAG_SIZE: usize = 14; enum TagType { SNAP = 0x4000, WAL = 0x8000, SYS = 0xc000, INVALID = 0, } impl TagType { fn new(repr: u16) -> TagType { match repr &!TAG_MASK { 0x4000 => TagType::SNAP, 0x8000 => TagType::WAL, 0xc000 => TagType::SYS, _ => TagType::INVALID, } } } impl fmt::Display for TagType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { TagType::SNAP => write!(f, "snap"), TagType::WAL => write!(f, "wal"), TagType::SYS => write!(f, "sys"), TagType::INVALID => write!(f, "invalid"), } } } #[derive(PartialEq, Eq)] enum ShardType { POR, RAFT, PART } impl ShardType { fn new(repr: u8) -> Result<Self> { match repr { 0 => Ok(ShardType::POR), 1 => Ok(ShardType::RAFT), 2 => Ok(ShardType::PART), _ => bail!("invalid shard type {}", repr) } } } // TODO: switch to byteordered? struct LittleEndianReader<'a> (std::io::Cursor<&'a[u8]>); impl<'a> LittleEndianReader<'a> { fn new(buf: &'a[u8]) -> Self { Self(std::io::Cursor::new(buf)) } fn read_u8(&mut self) -> u8 { self.0.read_u8().unwrap() } fn read_u16(&mut self) -> u16 { self.0.read_u16::<LittleEndian>().unwrap() } fn read_u32(&mut self) -> u32 { self.0.read_u32::<LittleEndian>().unwrap() } fn read_i64(&mut self) -> i64 { self.0.read_i64::<LittleEndian>().unwrap() } fn read_u64(&mut self) -> u64 { self.0.read_u64::<LittleEndian>().unwrap() } fn read_str(&mut self, len: usize) -> &str { let pos = self.0.position() as usize; let raw = &self.0.get_ref()[pos..pos+len]; let (raw, _) = raw.split_at(raw.iter().position(|&x| x == 0).unwrap_or(len)); let str = std::str::from_utf8(raw).unwrap(); self.0.set_position((pos+len) as u64); str } fn into_cursor(self) -> std::io::Cursor<&'a[u8]> { self.0 } fn
(&self) -> &[u8] { &self.0.get_ref()[self.0.position() as usize..] } } pub fn print_row<W: fmt::Write + fmt::Debug>(buf: &mut W, row: &Row, handler: &dyn Fn(&mut W, u16, &[u8])) -> Result<()> { fn int_flag(name: &str, default: bool) -> bool { let flag = try { let val = env::var(name).ok()?; val.parse::<usize>().ok()? }; Some(1) == flag || default } static PRINT_HEADER : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_ROW_HEADER", true) }); static PRINT_RUN_CRC : Lazy<bool> = Lazy::new(|| { int_flag("OCTOPUS_CAT_RUN_CRC", false) }); let tag = row.tag(); if *PRINT_HEADER || true { write!(buf, "lsn:{}", {row.lsn})?; if row.scn!= -1 || tag == Tag::RaftVote || tag == Tag::SnapData { write!(buf, " shard:{}", {row.shard_id})?; if *PRINT_RUN_CRC { write!(buf, " run_crc:0x{:08x}", unsafe {row.aux.run_crc})?; } } write!(buf, " scn:{} tm:{:.3} t:{}/{} ", {row.scn}, {row.tm}, row.tag_type(), row.tag())?; } use mem::size_of; let mut reader = LittleEndianReader::new(row.data()); match row.tag() { Tag::SnapInitial => { if row.data().len() == size_of::<u32>() * 3 { let count = reader.read_u32(); let crc_log = reader.read_u32(); let crc_mod = reader.read_u32(); write!(buf, "count:{} run_crc_log:0x{:08x} run_crc_mod:0x{:08x}", count, crc_log, crc_mod)?; } else if row.scn == -1 { let ver = reader.read_u8(); let count = reader.read_u32(); let flags = reader.read_u32(); write!(buf, "ver:{} count:{} flags:0x{:08x}", ver, count, flags)?; } else { write!(buf, "unknow format")?; } }, Tag::RunCrc => { let mut scn = -1; if row.data().len() == size_of::<i64>() + 2 * size_of::<u32>() { scn = reader.read_i64(); } let crc_log = reader.read_u32(); let _ = reader.read_u32(); /* ignore run_crc_mod */ write!(buf, "SCN:{} log:0x{:08x}", scn, crc_log)?; }, Tag::SnapData | Tag::WalData | Tag::UserTag(_) | Tag::Tlv => { handler(buf, row.tag, row.data()); return Ok(()) } Tag::SnapFinal => { let mut end = [0u8; 4]; for i in 0..3 { end[i] = reader.read_u8() } if end!= ['E' as u8, 'N' as u8, 'D' as u8, 0] { write!(buf, " {:x?}", &end)?; } }, Tag::WalFinal => (), Tag::Nop => { if reader.unparsed().len() > 0 { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } } }, Tag::SysTag(_) => (), Tag::RaftAppend | Tag::RaftCommit => { let flags = reader.read_u16(); let term = reader.read_u64(); let inner_tag_raw = reader.read_u16(); let inner_tag = Tag::new(inner_tag_raw); write!(buf, "term:{} flags:0x{:02x} it:{} ", flags, term, inner_tag)?; match inner_tag { Tag::RunCrc => { let scn = reader.read_u64(); let log = reader.read_u32(); let _ = reader.read_u32(); /* ignore run_crc_mod */ write!(buf, "SCN:{} log:0x{:08x}", scn, log)?; }, Tag::Nop => { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } }, _ => { handler(buf, inner_tag_raw, reader.into_cursor().into_inner()); return Ok(()) } } }, Tag::RaftVote => { let flags = reader.read_u16(); let term = reader.read_u64(); let peer_id = reader.read_u8(); write!(buf, "term:{} flags:0x{:02x} peer:{}", term, flags, peer_id)?; }, Tag::ShardCreate | Tag::ShardAlter => { let ver = reader.read_u8(); if ver!= 1 { bail!("unknow version: {}", ver); } let shard_type = ShardType::new(reader.read_u8())?; let estimated_row_count = reader.read_u32(); match row.tag() { Tag::ShardCreate => write!(buf,"SHARD_CREATE")?, Tag::ShardAlter => write!(buf, "SHARD_ALTER")?, _ => unreachable!(), } write!(buf, " shard_id:{}", {row.shard_id})?; match shard_type { ShardType::RAFT => write!(buf, " RAFT")?, ShardType::POR => write!(buf, " POR")?, ShardType::PART => write!(buf, " PART")?, } let mod_name = reader.read_str(16); write!(buf, " {}", mod_name)?; write!(buf, " count:{} run_crc:0x{:08x}", estimated_row_count, unsafe { row.aux.run_crc })?; write!(buf, " master:{}", reader.read_str(16))?; for _ in 0..4 { let peer_name = reader.read_str(16); if peer_name.len() > 0 { write!(buf, " repl:{}", peer_name)?; } } let aux_len = reader.read_u16(); if aux_len > 0 { write!(buf, " aux:")?; for _ in 0..aux_len { let b = reader.read_u8(); write!(buf, "{:02x} ", b)?; } } }, Tag::ShardFinal => { let dummy = reader.read_u16(); if dummy!= 0 { write!(buf, " {:02x}", dummy)?; } }, } if reader.unparsed().len() > 0 { write!(buf, " unparsed: {:x?} ", reader.unparsed())?; } Ok(()) } #[test] fn test_print_row() { use std::{path::Path, fmt::Write}; println!("current dir {:?}", env::current_dir().unwrap()); let mut xlog = XLog::name(Path::new("testdata/00000000000000000002.xlog")).unwrap(); let mut buf = String::new(); env::set_var("OCTOPUS_CAT_ROW_HEADER", "1"); fn hexdump(buf: &mut String, _tag: u16, data: &[u8]) { write!(buf, " {:?x}", data).unwrap(); } if let IO::Read(reader) = &mut xlog.io { loop { match reader.read_row() { Ok(Some(row)) => { print_row(&mut buf, &row, &hexdump).unwrap(); println!("row {}", buf); buf.clear(); }, Ok(None) => break, Err(err) => { println!("fail {:?}", err); break; } } } } } mod ffi { use super::*; use crate::tbuf::TBuf; #[no_mangle] unsafe extern "C" fn print_row(out: *mut TBuf, row: *const Row, handler: Option<extern fn(out: *mut TBuf, tag: u16, data: *const TBuf) -> ()>) { let ret = super::print_row(&mut *out, &*row, &|buf: &mut TBuf, tag: u16
unparsed
identifier_name
lib.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Utility Pallet //! A stateless pallet with helpers for dispatch management which does no re-authentication. //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! //! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature //! verify, or in combination with one of the other two dispatch functionality. //! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from //! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative //! accounts are, for the purposes of proxy filtering considered exactly the same as the origin //! and are thus hampered with the origin's filters. //! //! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface //! //! ### Dispatchable Functions //! //! #### For batch dispatch //! * `batch` - Dispatch multiple calls from the sender's origin. //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; mod tests; pub mod weights; use codec::{Decode, Encode}; use frame_support::{ dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet<T>(_); /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>; /// The overarching call type. type RuntimeCall: Parameter + Dispatchable<RuntimeOrigin = Self::RuntimeOrigin, PostInfo = PostDispatchInfo> + GetDispatchInfo + From<frame_system::Call<Self>> + UnfilteredDispatchable<RuntimeOrigin = Self::RuntimeOrigin> + IsSubType<Call<Self>> + IsType<<Self as frame_system::Config>::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + Into<<Self as frame_system::Config>::RuntimeOrigin> + IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. BatchInterrupted { index: u32, error: DispatchError }, /// Batch of dispatches completed fully with no error. BatchCompleted, /// Batch of dispatches completed but has errors. BatchCompletedWithErrors, /// A single item within a Batch of dispatches has completed with no error. ItemCompleted, /// A single item within a Batch of dispatches has completed with error. ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we // algin the call size. The value is chosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] impl<T: Config> Pallet<T> { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32
} #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); } } #[pallet::error] pub enum Error<T> { /// Too many calls batched. TooManyCalls, } #[pallet::call] impl<T: Config> Pallet<T> { /// Send a batch of dispatch calls. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. /// /// This will return `Ok` in all circumstances. To determine the success of the batch, an /// event is deposited. If a call failed and the batch was interrupted, then the /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { Self::deposit_event(Event::BatchInterrupted { index: index as u32, error: e.error, }); // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. return Ok(Some(base_weight + weight).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight + weight).into()) } /// Send a call through an indexed pseudonym of the sender. /// /// Filter from origin are passed along. The call will be dispatched with an origin which /// use the same filter as the origin of this call. /// /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. /// because you expect `proxy` to have been used prior in the call stack and you do not want /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` /// in the Multisig pallet instead. /// /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. #[pallet::call_index(1)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn as_derivative( origin: OriginFor<T>, index: u16, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); let info = call.get_dispatch_info(); let result = call.dispatch(origin); // Always take into account the base weight of this call. let mut weight = T::WeightInfo::as_derivative() .saturating_add(T::DbWeight::get().reads_writes(1, 1)); // Add the real weight of the dispatch. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result .map_err(|mut err| { err.post_info = Some(weight).into(); err }) .map(|_| Some(weight).into()) } /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch_all( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. filtered_origin.add_filter( move |c: &<T as frame_system::Config>::RuntimeCall| { let c = <T as Config>::RuntimeCall::from_ref(c); !matches!(c.is_sub_type(), Some(Call::batch_all {.. })) }, ); call.dispatch(filtered_origin) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result.map_err(|mut err| { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. err.post_info = Some(base_weight + weight).into(); err })?; Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. /// /// The dispatch origin for this call must be _Root_. /// /// ## Complexity /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn dispatch_as( origin: OriginFor<T>, as_origin: Box<T::PalletsOrigin>, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter((*as_origin).into()); Self::deposit_event(Event::DispatchedAs { result: res.map(|_| ()).map_err(|e| e.error), }); Ok(()) } /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatch without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn force_batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { has_error = true; Self::deposit_event(Event::ItemFailed { error: e.error }); } else { Self::deposit_event(Event::ItemCompleted); } } if has_error { Self::deposit_event(Event::BatchCompletedWithErrors); } else { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatch a function call with a specified weight. /// /// This function does not check the weight of the call, and instead allows the /// Root origin to specify the weight of the call. /// /// The dispatch origin for this call must be _Root_. #[pallet::call_index(5)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor<T>, call: Box<<T as Config>::RuntimeCall>, _weight: Weight, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] struct IndexedUtilityPalletId(u16); impl TypeId for IndexedUtilityPalletId { const TYPE_ID: [u8; 4] = *b"suba"; } impl<T: Config> Pallet<T> { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed") } }
{ let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 + CALL_ALIGN - 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; allocator_limit / margin_factor / call_size }
identifier_body
lib.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Utility Pallet //! A stateless pallet with helpers for dispatch management which does no re-authentication. //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! //! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature //! verify, or in combination with one of the other two dispatch functionality. //! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from //! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative //! accounts are, for the purposes of proxy filtering considered exactly the same as the origin //! and are thus hampered with the origin's filters. //! //! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface //! //! ### Dispatchable Functions //! //! #### For batch dispatch //! * `batch` - Dispatch multiple calls from the sender's origin. //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; mod tests; pub mod weights; use codec::{Decode, Encode}; use frame_support::{ dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet<T>(_); /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>; /// The overarching call type. type RuntimeCall: Parameter + Dispatchable<RuntimeOrigin = Self::RuntimeOrigin, PostInfo = PostDispatchInfo> + GetDispatchInfo + From<frame_system::Call<Self>> + UnfilteredDispatchable<RuntimeOrigin = Self::RuntimeOrigin> + IsSubType<Call<Self>> + IsType<<Self as frame_system::Config>::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + Into<<Self as frame_system::Config>::RuntimeOrigin> + IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. BatchInterrupted { index: u32, error: DispatchError }, /// Batch of dispatches completed fully with no error. BatchCompleted, /// Batch of dispatches completed but has errors. BatchCompletedWithErrors, /// A single item within a Batch of dispatches has completed with no error. ItemCompleted, /// A single item within a Batch of dispatches has completed with error. ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we // algin the call size. The value is chosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] impl<T: Config> Pallet<T> { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 + CALL_ALIGN - 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; allocator_limit / margin_factor / call_size } } #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { fn
() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); } } #[pallet::error] pub enum Error<T> { /// Too many calls batched. TooManyCalls, } #[pallet::call] impl<T: Config> Pallet<T> { /// Send a batch of dispatch calls. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. /// /// This will return `Ok` in all circumstances. To determine the success of the batch, an /// event is deposited. If a call failed and the batch was interrupted, then the /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { Self::deposit_event(Event::BatchInterrupted { index: index as u32, error: e.error, }); // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. return Ok(Some(base_weight + weight).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight + weight).into()) } /// Send a call through an indexed pseudonym of the sender. /// /// Filter from origin are passed along. The call will be dispatched with an origin which /// use the same filter as the origin of this call. /// /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. /// because you expect `proxy` to have been used prior in the call stack and you do not want /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` /// in the Multisig pallet instead. /// /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. #[pallet::call_index(1)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn as_derivative( origin: OriginFor<T>, index: u16, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); let info = call.get_dispatch_info(); let result = call.dispatch(origin); // Always take into account the base weight of this call. let mut weight = T::WeightInfo::as_derivative() .saturating_add(T::DbWeight::get().reads_writes(1, 1)); // Add the real weight of the dispatch. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result .map_err(|mut err| { err.post_info = Some(weight).into(); err }) .map(|_| Some(weight).into()) } /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch_all( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. filtered_origin.add_filter( move |c: &<T as frame_system::Config>::RuntimeCall| { let c = <T as Config>::RuntimeCall::from_ref(c); !matches!(c.is_sub_type(), Some(Call::batch_all {.. })) }, ); call.dispatch(filtered_origin) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result.map_err(|mut err| { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. err.post_info = Some(base_weight + weight).into(); err })?; Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. /// /// The dispatch origin for this call must be _Root_. /// /// ## Complexity /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn dispatch_as( origin: OriginFor<T>, as_origin: Box<T::PalletsOrigin>, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter((*as_origin).into()); Self::deposit_event(Event::DispatchedAs { result: res.map(|_| ()).map_err(|e| e.error), }); Ok(()) } /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatch without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn force_batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { has_error = true; Self::deposit_event(Event::ItemFailed { error: e.error }); } else { Self::deposit_event(Event::ItemCompleted); } } if has_error { Self::deposit_event(Event::BatchCompletedWithErrors); } else { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatch a function call with a specified weight. /// /// This function does not check the weight of the call, and instead allows the /// Root origin to specify the weight of the call. /// /// The dispatch origin for this call must be _Root_. #[pallet::call_index(5)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor<T>, call: Box<<T as Config>::RuntimeCall>, _weight: Weight, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] struct IndexedUtilityPalletId(u16); impl TypeId for IndexedUtilityPalletId { const TYPE_ID: [u8; 4] = *b"suba"; } impl<T: Config> Pallet<T> { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed") } }
integrity_test
identifier_name
lib.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Utility Pallet //! A stateless pallet with helpers for dispatch management which does no re-authentication. //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! //! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature //! verify, or in combination with one of the other two dispatch functionality. //! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from //! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative //! accounts are, for the purposes of proxy filtering considered exactly the same as the origin //! and are thus hampered with the origin's filters. //! //! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface //! //! ### Dispatchable Functions //! //! #### For batch dispatch //! * `batch` - Dispatch multiple calls from the sender's origin. //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; mod tests; pub mod weights; use codec::{Decode, Encode}; use frame_support::{ dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet<T>(_); /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>; /// The overarching call type. type RuntimeCall: Parameter + Dispatchable<RuntimeOrigin = Self::RuntimeOrigin, PostInfo = PostDispatchInfo> + GetDispatchInfo + From<frame_system::Call<Self>> + UnfilteredDispatchable<RuntimeOrigin = Self::RuntimeOrigin> + IsSubType<Call<Self>> + IsType<<Self as frame_system::Config>::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + Into<<Self as frame_system::Config>::RuntimeOrigin> + IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. BatchInterrupted { index: u32, error: DispatchError }, /// Batch of dispatches completed fully with no error. BatchCompleted, /// Batch of dispatches completed but has errors. BatchCompletedWithErrors, /// A single item within a Batch of dispatches has completed with no error. ItemCompleted, /// A single item within a Batch of dispatches has completed with error. ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we // algin the call size. The value is chosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] impl<T: Config> Pallet<T> { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 + CALL_ALIGN - 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; allocator_limit / margin_factor / call_size } } #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); } } #[pallet::error] pub enum Error<T> { /// Too many calls batched. TooManyCalls, } #[pallet::call] impl<T: Config> Pallet<T> { /// Send a batch of dispatch calls. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. /// /// This will return `Ok` in all circumstances. To determine the success of the batch, an /// event is deposited. If a call failed and the batch was interrupted, then the /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { Self::deposit_event(Event::BatchInterrupted { index: index as u32, error: e.error, }); // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. return Ok(Some(base_weight + weight).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight + weight).into()) } /// Send a call through an indexed pseudonym of the sender. /// /// Filter from origin are passed along. The call will be dispatched with an origin which /// use the same filter as the origin of this call. /// /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. /// because you expect `proxy` to have been used prior in the call stack and you do not want /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` /// in the Multisig pallet instead. /// /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. #[pallet::call_index(1)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn as_derivative( origin: OriginFor<T>, index: u16, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); let info = call.get_dispatch_info(); let result = call.dispatch(origin); // Always take into account the base weight of this call. let mut weight = T::WeightInfo::as_derivative() .saturating_add(T::DbWeight::get().reads_writes(1, 1)); // Add the real weight of the dispatch. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result .map_err(|mut err| { err.post_info = Some(weight).into(); err }) .map(|_| Some(weight).into()) } /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch_all( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok()
let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. filtered_origin.add_filter( move |c: &<T as frame_system::Config>::RuntimeCall| { let c = <T as Config>::RuntimeCall::from_ref(c); !matches!(c.is_sub_type(), Some(Call::batch_all {.. })) }, ); call.dispatch(filtered_origin) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result.map_err(|mut err| { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. err.post_info = Some(base_weight + weight).into(); err })?; Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. /// /// The dispatch origin for this call must be _Root_. /// /// ## Complexity /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn dispatch_as( origin: OriginFor<T>, as_origin: Box<T::PalletsOrigin>, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter((*as_origin).into()); Self::deposit_event(Event::DispatchedAs { result: res.map(|_| ()).map_err(|e| e.error), }); Ok(()) } /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatch without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn force_batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { has_error = true; Self::deposit_event(Event::ItemFailed { error: e.error }); } else { Self::deposit_event(Event::ItemCompleted); } } if has_error { Self::deposit_event(Event::BatchCompletedWithErrors); } else { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatch a function call with a specified weight. /// /// This function does not check the weight of the call, and instead allows the /// Root origin to specify the weight of the call. /// /// The dispatch origin for this call must be _Root_. #[pallet::call_index(5)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor<T>, call: Box<<T as Config>::RuntimeCall>, _weight: Weight, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] struct IndexedUtilityPalletId(u16); impl TypeId for IndexedUtilityPalletId { const TYPE_ID: [u8; 4] = *b"suba"; } impl<T: Config> Pallet<T> { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed") } }
{ return Err(BadOrigin.into()) }
conditional_block
lib.rs
// This file is part of Substrate. // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Utility Pallet //! A stateless pallet with helpers for dispatch management which does no re-authentication. //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! //! This pallet contains two basic pieces of functionality: //! - Batch dispatch: A stateless operation, allowing any origin to execute multiple calls in a //! single dispatch. This can be useful to amalgamate proposals, combining `set_code` with //! corresponding `set_storage`s, for efficient multiple payouts with just a single signature //! verify, or in combination with one of the other two dispatch functionality. //! - Pseudonymal dispatch: A stateless operation, allowing a signed origin to execute a call from //! an alternative signed origin. Each account has 2 * 2**16 possible "pseudonyms" (alternative //! account IDs) and these can be stacked. This can be useful as a key management tool, where you //! need multiple distinct accounts (e.g. as controllers for many staking accounts), but where //! it's perfectly fine to have each of them controlled by the same underlying keypair. Derivative //! accounts are, for the purposes of proxy filtering considered exactly the same as the origin //! and are thus hampered with the origin's filters. //! //! Since proxy filters are respected in all dispatches of this pallet, it should never need to be //! filtered by any proxy. //! //! ## Interface //! //! ### Dispatchable Functions //! //! #### For batch dispatch //! * `batch` - Dispatch multiple calls from the sender's origin. //! //! #### For pseudonymal dispatch //! * `as_derivative` - Dispatch a call from a derivative signed origin. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; mod tests; pub mod weights; use codec::{Decode, Encode}; use frame_support::{ dispatch::{extract_actual_weight, GetDispatchInfo, PostDispatchInfo}, traits::{IsSubType, OriginTrait, UnfilteredDispatchable}, }; use sp_core::TypeId; use sp_io::hashing::blake2_256; use sp_runtime::traits::{BadOrigin, Dispatchable, TrailingZeroInput}; use sp_std::prelude::*; pub use weights::WeightInfo; pub use pallet::*; #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; #[pallet::pallet] pub struct Pallet<T>(_); /// Configuration trait. #[pallet::config] pub trait Config: frame_system::Config { /// The overarching event type. type RuntimeEvent: From<Event> + IsType<<Self as frame_system::Config>::RuntimeEvent>; /// The overarching call type. type RuntimeCall: Parameter + Dispatchable<RuntimeOrigin = Self::RuntimeOrigin, PostInfo = PostDispatchInfo> + GetDispatchInfo + From<frame_system::Call<Self>> + UnfilteredDispatchable<RuntimeOrigin = Self::RuntimeOrigin> + IsSubType<Call<Self>> + IsType<<Self as frame_system::Config>::RuntimeCall>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: Parameter + Into<<Self as frame_system::Config>::RuntimeOrigin> + IsType<<<Self as frame_system::Config>::RuntimeOrigin as frame_support::traits::OriginTrait>::PalletsOrigin>; /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. BatchInterrupted { index: u32, error: DispatchError }, /// Batch of dispatches completed fully with no error. BatchCompleted, /// Batch of dispatches completed but has errors. BatchCompletedWithErrors, /// A single item within a Batch of dispatches has completed with no error. ItemCompleted, /// A single item within a Batch of dispatches has completed with error. ItemFailed { error: DispatchError }, /// A call was dispatched. DispatchedAs { result: DispatchResult }, } // Align the call size to 1KB. As we are currently compiling the runtime for native/wasm // the `size_of` of the `Call` can be different. To ensure that this don't leads to // mismatches between native/wasm or to different metadata for the same runtime, we // algin the call size. The value is chosen big enough to hopefully never reach it. const CALL_ALIGN: u32 = 1024; #[pallet::extra_constants] impl<T: Config> Pallet<T> { /// The limit on the number of batched calls. fn batched_calls_limit() -> u32 { let allocator_limit = sp_core::MAX_POSSIBLE_ALLOCATION; let call_size = ((sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 + CALL_ALIGN - 1) / CALL_ALIGN) * CALL_ALIGN; // The margin to take into account vec doubling capacity. let margin_factor = 3; allocator_limit / margin_factor / call_size } } #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { fn integrity_test() { // If you hit this error, you need to try to `Box` big dispatchable parameters. assert!( sp_std::mem::size_of::<<T as Config>::RuntimeCall>() as u32 <= CALL_ALIGN, "Call enum size should be smaller than {} bytes.", CALL_ALIGN, ); } } #[pallet::error] pub enum Error<T> { /// Too many calls batched. TooManyCalls, } #[pallet::call] impl<T: Config> Pallet<T> { /// Send a batch of dispatch calls. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity
/// event is deposited. If a call failed and the batch was interrupted, then the /// `BatchInterrupted` event is deposited, along with the number of successful calls made /// and the error of the failed call. If all were successful, then the `BatchCompleted` /// event is deposited. #[pallet::call_index(0)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { Self::deposit_event(Event::BatchInterrupted { index: index as u32, error: e.error, }); // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. return Ok(Some(base_weight + weight).into()) } Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight + weight).into()) } /// Send a call through an indexed pseudonym of the sender. /// /// Filter from origin are passed along. The call will be dispatched with an origin which /// use the same filter as the origin of this call. /// /// NOTE: If you need to ensure that any account-based filtering is not honored (i.e. /// because you expect `proxy` to have been used prior in the call stack and you do not want /// the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1` /// in the Multisig pallet instead. /// /// NOTE: Prior to version *12, this was called `as_limited_sub`. /// /// The dispatch origin for this call must be _Signed_. #[pallet::call_index(1)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::as_derivative() // AccountData for inner call origin accountdata. .saturating_add(T::DbWeight::get().reads_writes(1, 1)) .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn as_derivative( origin: OriginFor<T>, index: u16, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { let mut origin = origin; let who = ensure_signed(origin.clone())?; let pseudonym = Self::derivative_account_id(who, index); origin.set_caller_from(frame_system::RawOrigin::Signed(pseudonym)); let info = call.get_dispatch_info(); let result = call.dispatch(origin); // Always take into account the base weight of this call. let mut weight = T::WeightInfo::as_derivative() .saturating_add(T::DbWeight::get().reads_writes(1, 1)); // Add the real weight of the dispatch. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result .map_err(|mut err| { err.post_info = Some(weight).into(); err }) .map(|_| Some(weight).into()) } /// Send a batch of dispatch calls and atomically execute them. /// The whole transaction will rollback and fail if any of the calls failed. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatched without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(2)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::batch_all(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn batch_all( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); for (index, call) in calls.into_iter().enumerate() { let info = call.get_dispatch_info(); // If origin is root, bypass any dispatch filter; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { let mut filtered_origin = origin.clone(); // Don't allow users to nest `batch_all` calls. filtered_origin.add_filter( move |c: &<T as frame_system::Config>::RuntimeCall| { let c = <T as Config>::RuntimeCall::from_ref(c); !matches!(c.is_sub_type(), Some(Call::batch_all {.. })) }, ); call.dispatch(filtered_origin) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); result.map_err(|mut err| { // Take the weight of this function itself into account. let base_weight = T::WeightInfo::batch_all(index.saturating_add(1) as u32); // Return the actual used weight + base_weight of this call. err.post_info = Some(base_weight + weight).into(); err })?; Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatches a function call with a provided origin. /// /// The dispatch origin for this call must be _Root_. /// /// ## Complexity /// - O(1). #[pallet::call_index(3)] #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( T::WeightInfo::dispatch_as() .saturating_add(dispatch_info.weight), dispatch_info.class, ) })] pub fn dispatch_as( origin: OriginFor<T>, as_origin: Box<T::PalletsOrigin>, call: Box<<T as Config>::RuntimeCall>, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter((*as_origin).into()); Self::deposit_event(Event::DispatchedAs { result: res.map(|_| ()).map_err(|e| e.error), }); Ok(()) } /// Send a batch of dispatch calls. /// Unlike `batch`, it allows errors and won't interrupt. /// /// May be called from any origin except `None`. /// /// - `calls`: The calls to be dispatched from the same origin. The number of call must not /// exceed the constant: `batched_calls_limit` (available in constant metadata). /// /// If origin is root then the calls are dispatch without checking origin filter. (This /// includes bypassing `frame_system::Config::BaseCallFilter`). /// /// ## Complexity /// - O(C) where C is the number of calls to be batched. #[pallet::call_index(4)] #[pallet::weight({ let dispatch_infos = calls.iter().map(|call| call.get_dispatch_info()).collect::<Vec<_>>(); let dispatch_weight = dispatch_infos.iter() .map(|di| di.weight) .fold(Weight::zero(), |total: Weight, weight: Weight| total.saturating_add(weight)) .saturating_add(T::WeightInfo::force_batch(calls.len() as u32)); let dispatch_class = { let all_operational = dispatch_infos.iter() .map(|di| di.class) .all(|class| class == DispatchClass::Operational); if all_operational { DispatchClass::Operational } else { DispatchClass::Normal } }; (dispatch_weight, dispatch_class) })] pub fn force_batch( origin: OriginFor<T>, calls: Vec<<T as Config>::RuntimeCall>, ) -> DispatchResultWithPostInfo { // Do not allow the `None` origin. if ensure_none(origin.clone()).is_ok() { return Err(BadOrigin.into()) } let is_root = ensure_root(origin.clone()).is_ok(); let calls_len = calls.len(); ensure!(calls_len <= Self::batched_calls_limit() as usize, Error::<T>::TooManyCalls); // Track the actual weight of each of the batch calls. let mut weight = Weight::zero(); // Track failed dispatch occur. let mut has_error: bool = false; for call in calls.into_iter() { let info = call.get_dispatch_info(); // If origin is root, don't apply any dispatch filters; root can call anything. let result = if is_root { call.dispatch_bypass_filter(origin.clone()) } else { call.dispatch(origin.clone()) }; // Add the weight of this call. weight = weight.saturating_add(extract_actual_weight(&result, &info)); if let Err(e) = result { has_error = true; Self::deposit_event(Event::ItemFailed { error: e.error }); } else { Self::deposit_event(Event::ItemCompleted); } } if has_error { Self::deposit_event(Event::BatchCompletedWithErrors); } else { Self::deposit_event(Event::BatchCompleted); } let base_weight = T::WeightInfo::batch(calls_len as u32); Ok(Some(base_weight.saturating_add(weight)).into()) } /// Dispatch a function call with a specified weight. /// /// This function does not check the weight of the call, and instead allows the /// Root origin to specify the weight of the call. /// /// The dispatch origin for this call must be _Root_. #[pallet::call_index(5)] #[pallet::weight((*_weight, call.get_dispatch_info().class))] pub fn with_weight( origin: OriginFor<T>, call: Box<<T as Config>::RuntimeCall>, _weight: Weight, ) -> DispatchResult { ensure_root(origin)?; let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); res.map(|_| ()).map_err(|e| e.error) } } } /// A pallet identifier. These are per pallet and should be stored in a registry somewhere. #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] struct IndexedUtilityPalletId(u16); impl TypeId for IndexedUtilityPalletId { const TYPE_ID: [u8; 4] = *b"suba"; } impl<T: Config> Pallet<T> { /// Derive a derivative account ID from the owner account and the sub-account index. pub fn derivative_account_id(who: T::AccountId, index: u16) -> T::AccountId { let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) .expect("infinite length input; no invalid inputs for type; qed") } }
/// - O(C) where C is the number of calls to be batched. /// /// This will return `Ok` in all circumstances. To determine the success of the batch, an
random_line_split
mod.rs
mod console_tests; mod iterator; use std::borrow::Cow; use ansi_term::Style; use itertools::Itertools; use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthStr; use iterator::{AnsiElementIterator, Element}; pub const ANSI_CSI_CLEAR_TO_EOL: &str = "\x1b[0K"; pub const ANSI_CSI_CLEAR_TO_BOL: &str = "\x1b[1K"; pub const ANSI_SGR_RESET: &str = "\x1b[0m"; pub const ANSI_SGR_REVERSE: &str = "\x1b[7m"; pub fn strip_ansi_codes(s: &str) -> String { strip_ansi_codes_from_strings_iterator(ansi_strings_iterator(s)) } pub fn measure_text_width(s: &str) -> usize { ansi_strings_iterator(s).fold(0, |acc, (element, is_ansi)| { acc + if is_ansi { 0 } else { element.width() } }) } /// Truncate string such that `tail` is present as a suffix, preceded by as much of `s` as can be /// displayed in the requested width. // Return string constructed as follows: // 1. `display_width` characters are available. If the string fits, return it. // // 2. Contribute graphemes and ANSI escape sequences from `tail` until either (1) `tail` is // exhausted, or (2) the display width of the result would exceed `display_width`. // // 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the // display_width of the result would exceed `display_width`. pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> { let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>(); let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width(); if width <= display_width { return Cow::from(s); } let result_tail = if!tail.is_empty() { truncate_str(tail, display_width, "").to_string() } else { String::new() }; let mut used = measure_text_width(&result_tail); let mut result = String::new(); for (t, is_ansi) in items { if!is_ansi { for g in t.graphemes(true) { let w = g.width(); if used + w > display_width { result.push_str(&" ".repeat(display_width.saturating_sub(used))); break; } result.push_str(g); used += w; } } else { result.push_str(t); } } Cow::from(format!("{result}{result_tail}")) } pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> { let mut sections = Vec::new(); let mut curr_style = Style::default(); for element in AnsiElementIterator::new(s) { match element { Element::Text(start, end) => sections.push((curr_style, &s[start..end])), Element::Sgr(style, _, _) => curr_style = style, _ => {} } } sections } // Return the first CSI element, if any, as an `ansi_term::Style`. pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> { AnsiElementIterator::new(s).find_map(|el| match el { Element::Sgr(style, _, _) => Some(style), _ => None, }) } pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool { AnsiElementIterator::new(s) .next() .map(|el| matches!(el, Element::Sgr(_, _, _))) .unwrap_or(false) } /// Return string formed from a byte slice starting at byte position `start`, where the index /// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the /// original string are preserved. pub fn ansi_preserving_slice(s: &str, start: usize) -> String { AnsiElementIterator::new(s) .scan(0, |index, element| { // `index` is the index in non-ANSI-escape-sequence content. Some(match element { Element::Sgr(_, a, b) => &s[a..b], Element::Csi(a, b) => &s[a..b], Element::Esc(a, b) => &s[a..b], Element::Osc(a, b) => &s[a..b], Element::Text(a, b) => { let i = *index; *index += b - a; if *index <= start { // This text segment ends before start, so contributes no bytes. "" } else if i > start { // This section starts after `start`, so contributes all its bytes. &s[a..b] } else { // This section contributes those bytes that are >= start &s[(a + start - i)..b] } } }) }) .join("") } /// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts /// bytes in non-ANSI-escape-sequence content only. pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> { let mut index = 0; for element in AnsiElementIterator::new(s) { if let Element::Text(a, b) = element { index += b - a; if index > i { return Some(b - (index - i)); } } } None } fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> { AnsiElementIterator::new(s).map(move |el| match el { Element::Sgr(_, i, j) => (&s[i..j], true), Element::Csi(i, j) => (&s[i..j], true), Element::Esc(i, j) => (&s[i..j], true), Element::Osc(i, j) => (&s[i..j], true), Element::Text(i, j) => (&s[i..j], false), }) } fn strip_ansi_codes_from_strings_iterator<'a>( strings: impl Iterator<Item = (&'a str, bool)>, ) -> String { strings .filter_map(|(el, is_ansi)| if!is_ansi { Some(el) } else { None }) .join("") } pub fn explain_ansi(line: &str, colorful: bool) -> String { use crate::style::Style; parse_style_sections(line) .into_iter() .map(|(ansi_term_style, s)| { let style = Style { ansi_term_style, ..Style::default() }; if colorful { format!("({}){}", style.to_painted_string(), style.paint(s)) } else { format!("({style}){s}") } }) .collect() } #[cfg(test)] mod tests { use crate::ansi::ansi_preserving_index; // Note that src/ansi/console_tests.rs contains additional test coverage for this module. use super::{ ansi_preserving_slice, measure_text_width, parse_first_style, string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str, }; #[test] fn test_strip_ansi_codes() { for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] { assert_eq!(strip_ansi_codes(s), *s); } assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー"); } #[test] fn test_measure_text_width() { assert_eq!(measure_text_width("src/ansi/mod.rs"), 15); assert_eq!(measure_text_width("バー"), 4); assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19); assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4); assert_eq!(measure_text_width("a\nb\n"), 2); } #[test] fn test_strip_ansi_codes_osc_hyperlink() { assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"), "src/ansi/mod.rs\n"); } #[test] fn test_measure_text_width_osc_hyperlink() { assert_eq!(measure
asure_text_width_osc_hyperlink_non_ascii() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/modバー.rs")); } #[test] fn test_parse_first_style() { let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n"; let style = parse_first_style(minus_line_from_unconfigured_git); let expected_style = ansi_term::Style { foreground: Some(ansi_term::Color::Red), ..ansi_term::Style::default() }; assert_eq!(Some(expected_style), style); } #[test] fn test_string_starts_with_ansi_escape_sequence() { assert!(!string_starts_with_ansi_style_sequence("")); assert!(!string_starts_with_ansi_style_sequence("-")); assert!(string_starts_with_ansi_style_sequence( "\x1b[31m-XXX\x1b[m\n" )); assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX")); } #[test] fn test_ansi_preserving_slice_and_index() { assert_eq!(ansi_preserving_slice("", 0), ""); assert_eq!(ansi_preserving_index("", 0), None); assert_eq!(ansi_preserving_slice("0", 0), "0"); assert_eq!(ansi_preserving_index("0", 0), Some(0)); assert_eq!(ansi_preserving_slice("0", 1), ""); assert_eq!(ansi_preserving_index("0", 1), None); let raw_string = "\x1b[1;35m0123456789\x1b[0m"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;35m123456789\x1b[0m" ); assert_eq!(ansi_preserving_slice(raw_string, 7), "\x1b[1;35m789\x1b[0m"); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(14)); let raw_string = "\x1b[1;36m0\x1b[m\x1b[1;36m123456789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;36m\x1b[m\x1b[1;36m123456789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(18)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); let raw_string = "\x1b[1;36m012345\x1b[m\x1b[1;36m6789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 3), "\x1b[1;36m345\x1b[m\x1b[1;36m6789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); } #[test] fn test_truncate_str() { assert_eq!(truncate_str("1", 1, ""), "1"); assert_eq!(truncate_str("12", 1, ""), "1"); assert_eq!(truncate_str("123", 2, "s"), "1s"); assert_eq!(truncate_str("123", 2, "→"), "1→"); assert_eq!(truncate_str("12ݶ", 1, "ݶ"), "ݶ"); } }
_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/mod.rs")); } #[test] fn test_me
identifier_body
mod.rs
mod console_tests; mod iterator; use std::borrow::Cow; use ansi_term::Style; use itertools::Itertools; use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthStr; use iterator::{AnsiElementIterator, Element}; pub const ANSI_CSI_CLEAR_TO_EOL: &str = "\x1b[0K"; pub const ANSI_CSI_CLEAR_TO_BOL: &str = "\x1b[1K"; pub const ANSI_SGR_RESET: &str = "\x1b[0m"; pub const ANSI_SGR_REVERSE: &str = "\x1b[7m"; pub fn strip_ansi_codes(s: &str) -> String { strip_ansi_codes_from_strings_iterator(ansi_strings_iterator(s)) } pub fn measure_text_width(s: &str) -> usize { ansi_strings_iterator(s).fold(0, |acc, (element, is_ansi)| { acc + if is_ansi { 0 } else { element.width() } }) } /// Truncate string such that `tail` is present as a suffix, preceded by as much of `s` as can be /// displayed in the requested width. // Return string constructed as follows: // 1. `display_width` characters are available. If the string fits, return it. // // 2. Contribute graphemes and ANSI escape sequences from `tail` until either (1) `tail` is // exhausted, or (2) the display width of the result would exceed `display_width`. // // 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the // display_width of the result would exceed `display_width`. pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> { let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>(); let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width(); if width <= display_width { return Cow::from(s); } let result_tail = if!tail.is_empty() { truncate_str(tail, display_width, "").to_string() } else { String::new() }; let mut used = measure_text_width(&result_tail); let mut result = String::new(); for (t, is_ansi) in items { if!is_ansi { for g in t.graphemes(true) { let w = g.width(); if used + w > display_width { result.push_str(&" ".repeat(display_width.saturating_sub(used))); break; } result.push_str(g); used += w; } } else { result.push_str(t); } } Cow::from(format!("{result}{result_tail}")) } pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> { let mut sections = Vec::new(); let mut curr_style = Style::default(); for element in AnsiElementIterator::new(s) { match element { Element::Text(start, end) => sections.push((curr_style, &s[start..end])), Element::Sgr(style, _, _) => curr_style = style, _ => {} } } sections } // Return the first CSI element, if any, as an `ansi_term::Style`. pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> { AnsiElementIterator::new(s).find_map(|el| match el { Element::Sgr(style, _, _) => Some(style), _ => None, }) } pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool { AnsiElementIterator::new(s) .next() .map(|el| matches!(el, Element::Sgr(_, _, _))) .unwrap_or(false) } /// Return string formed from a byte slice starting at byte position `start`, where the index /// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the /// original string are preserved. pub fn ansi_preserving_slice(s: &str, start: usize) -> String { AnsiElementIterator::new(s) .scan(0, |index, element| { // `index` is the index in non-ANSI-escape-sequence content. Some(match element { Element::Sgr(_, a, b) => &s[a..b], Element::Csi(a, b) => &s[a..b], Element::Esc(a, b) => &s[a..b], Element::Osc(a, b) => &s[a..b], Element::Text(a, b) => { let i = *index; *index += b - a; if *index <= start { // This text segment ends before start, so contributes no bytes. "" } else if i > start { // This section starts after `start`, so contributes all its bytes. &s[a..b] } else { // This section contributes those bytes that are >= start &s[(a + start - i)..b] } } }) }) .join("") } /// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts /// bytes in non-ANSI-escape-sequence content only. pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> { let mut index = 0; for element in AnsiElementIterator::new(s) { if let Element::Text(a, b) = element { index += b - a; if index > i { return Some(b - (index - i)); } } } None } fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> { AnsiElementIterator::new(s).map(move |el| match el { Element::Sgr(_, i, j) => (&s[i..j], true), Element::Csi(i, j) => (&s[i..j], true), Element::Esc(i, j) => (&s[i..j], true), Element::Osc(i, j) => (&s[i..j], true), Element::Text(i, j) => (&s[i..j], false), }) } fn strip_ansi_codes_from_strings_iterator<'a>( strings: impl Iterator<Item = (&'a str, bool)>, ) -> String { strings .filter_map(|(el, is_ansi)| if!is_ansi { Some(el) } else { None }) .join("") } pub fn explain_ansi(line: &str, colorful: bool) -> String { use crate::style::Style; parse_style_sections(line) .into_iter() .map(|(ansi_term_style, s)| { let style = Style { ansi_term_style, ..Style::default() }; if colorful { format!("({}){}", style.to_painted_string(), style.paint(s)) } else { format!("({style}){s}") } }) .collect() } #[cfg(test)] mod tests { use crate::ansi::ansi_preserving_index; // Note that src/ansi/console_tests.rs contains additional test coverage for this module. use super::{ ansi_preserving_slice, measure_text_width, parse_first_style, string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str, }; #[test] fn test_strip_ansi_codes() { for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] { assert_eq!(strip_ansi_codes(s), *s); } assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー"); } #[test] fn test_measure_text_width() { assert_eq!(measure_text_width("src/ansi/mod.rs"), 15); assert_eq!(measure_text_width("バー"), 4); assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19); assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4); assert_eq!(measure_text_width("a\nb\n"), 2); } #[test] fn test_strip_ansi_codes_osc_hyperlink() { assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"), "src/ansi/mod.rs\n"); } #[test] fn test_measure_text_width_osc_hyperlink() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/mod.rs")); } #[test] fn test_measure_text_width_osc_
ure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/modバー.rs")); } #[test] fn test_parse_first_style() { let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n"; let style = parse_first_style(minus_line_from_unconfigured_git); let expected_style = ansi_term::Style { foreground: Some(ansi_term::Color::Red), ..ansi_term::Style::default() }; assert_eq!(Some(expected_style), style); } #[test] fn test_string_starts_with_ansi_escape_sequence() { assert!(!string_starts_with_ansi_style_sequence("")); assert!(!string_starts_with_ansi_style_sequence("-")); assert!(string_starts_with_ansi_style_sequence( "\x1b[31m-XXX\x1b[m\n" )); assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX")); } #[test] fn test_ansi_preserving_slice_and_index() { assert_eq!(ansi_preserving_slice("", 0), ""); assert_eq!(ansi_preserving_index("", 0), None); assert_eq!(ansi_preserving_slice("0", 0), "0"); assert_eq!(ansi_preserving_index("0", 0), Some(0)); assert_eq!(ansi_preserving_slice("0", 1), ""); assert_eq!(ansi_preserving_index("0", 1), None); let raw_string = "\x1b[1;35m0123456789\x1b[0m"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;35m123456789\x1b[0m" ); assert_eq!(ansi_preserving_slice(raw_string, 7), "\x1b[1;35m789\x1b[0m"); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(14)); let raw_string = "\x1b[1;36m0\x1b[m\x1b[1;36m123456789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;36m\x1b[m\x1b[1;36m123456789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(18)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); let raw_string = "\x1b[1;36m012345\x1b[m\x1b[1;36m6789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 3), "\x1b[1;36m345\x1b[m\x1b[1;36m6789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); } #[test] fn test_truncate_str() { assert_eq!(truncate_str("1", 1, ""), "1"); assert_eq!(truncate_str("12", 1, ""), "1"); assert_eq!(truncate_str("123", 2, "s"), "1s"); assert_eq!(truncate_str("123", 2, "→"), "1→"); assert_eq!(truncate_str("12ݶ", 1, "ݶ"), "ݶ"); } }
hyperlink_non_ascii() { assert_eq!(meas
identifier_name
mod.rs
mod console_tests; mod iterator; use std::borrow::Cow; use ansi_term::Style; use itertools::Itertools; use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthStr; use iterator::{AnsiElementIterator, Element}; pub const ANSI_CSI_CLEAR_TO_EOL: &str = "\x1b[0K"; pub const ANSI_CSI_CLEAR_TO_BOL: &str = "\x1b[1K"; pub const ANSI_SGR_RESET: &str = "\x1b[0m"; pub const ANSI_SGR_REVERSE: &str = "\x1b[7m"; pub fn strip_ansi_codes(s: &str) -> String { strip_ansi_codes_from_strings_iterator(ansi_strings_iterator(s)) } pub fn measure_text_width(s: &str) -> usize { ansi_strings_iterator(s).fold(0, |acc, (element, is_ansi)| { acc + if is_ansi { 0 } else { element.width() } }) } /// Truncate string such that `tail` is present as a suffix, preceded by as much of `s` as can be /// displayed in the requested width. // Return string constructed as follows: // 1. `display_width` characters are available. If the string fits, return it. // // 2. Contribute graphemes and ANSI escape sequences from `tail` until either (1) `tail` is // exhausted, or (2) the display width of the result would exceed `display_width`. // // 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the // display_width of the result would exceed `display_width`. pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> { let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>(); let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width(); if width <= display_width { return Cow::from(s); } let result_tail = if!tail.is_empty() { truncate_str(tail, display_width, "").to_string() } else { String::new() }; let mut used = measure_text_width(&result_tail); let mut result = String::new(); for (t, is_ansi) in items { if!is_ansi { for g in t.graphemes(true) { let w = g.width(); if used + w > display_width { result.push_str(&" ".repeat(display_width.saturating_sub(used))); break; } result.push_str(g); used += w; } } else
} Cow::from(format!("{result}{result_tail}")) } pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> { let mut sections = Vec::new(); let mut curr_style = Style::default(); for element in AnsiElementIterator::new(s) { match element { Element::Text(start, end) => sections.push((curr_style, &s[start..end])), Element::Sgr(style, _, _) => curr_style = style, _ => {} } } sections } // Return the first CSI element, if any, as an `ansi_term::Style`. pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> { AnsiElementIterator::new(s).find_map(|el| match el { Element::Sgr(style, _, _) => Some(style), _ => None, }) } pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool { AnsiElementIterator::new(s) .next() .map(|el| matches!(el, Element::Sgr(_, _, _))) .unwrap_or(false) } /// Return string formed from a byte slice starting at byte position `start`, where the index /// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the /// original string are preserved. pub fn ansi_preserving_slice(s: &str, start: usize) -> String { AnsiElementIterator::new(s) .scan(0, |index, element| { // `index` is the index in non-ANSI-escape-sequence content. Some(match element { Element::Sgr(_, a, b) => &s[a..b], Element::Csi(a, b) => &s[a..b], Element::Esc(a, b) => &s[a..b], Element::Osc(a, b) => &s[a..b], Element::Text(a, b) => { let i = *index; *index += b - a; if *index <= start { // This text segment ends before start, so contributes no bytes. "" } else if i > start { // This section starts after `start`, so contributes all its bytes. &s[a..b] } else { // This section contributes those bytes that are >= start &s[(a + start - i)..b] } } }) }) .join("") } /// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts /// bytes in non-ANSI-escape-sequence content only. pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> { let mut index = 0; for element in AnsiElementIterator::new(s) { if let Element::Text(a, b) = element { index += b - a; if index > i { return Some(b - (index - i)); } } } None } fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> { AnsiElementIterator::new(s).map(move |el| match el { Element::Sgr(_, i, j) => (&s[i..j], true), Element::Csi(i, j) => (&s[i..j], true), Element::Esc(i, j) => (&s[i..j], true), Element::Osc(i, j) => (&s[i..j], true), Element::Text(i, j) => (&s[i..j], false), }) } fn strip_ansi_codes_from_strings_iterator<'a>( strings: impl Iterator<Item = (&'a str, bool)>, ) -> String { strings .filter_map(|(el, is_ansi)| if!is_ansi { Some(el) } else { None }) .join("") } pub fn explain_ansi(line: &str, colorful: bool) -> String { use crate::style::Style; parse_style_sections(line) .into_iter() .map(|(ansi_term_style, s)| { let style = Style { ansi_term_style, ..Style::default() }; if colorful { format!("({}){}", style.to_painted_string(), style.paint(s)) } else { format!("({style}){s}") } }) .collect() } #[cfg(test)] mod tests { use crate::ansi::ansi_preserving_index; // Note that src/ansi/console_tests.rs contains additional test coverage for this module. use super::{ ansi_preserving_slice, measure_text_width, parse_first_style, string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str, }; #[test] fn test_strip_ansi_codes() { for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] { assert_eq!(strip_ansi_codes(s), *s); } assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー"); } #[test] fn test_measure_text_width() { assert_eq!(measure_text_width("src/ansi/mod.rs"), 15); assert_eq!(measure_text_width("バー"), 4); assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19); assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4); assert_eq!(measure_text_width("a\nb\n"), 2); } #[test] fn test_strip_ansi_codes_osc_hyperlink() { assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"), "src/ansi/mod.rs\n"); } #[test] fn test_measure_text_width_osc_hyperlink() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/mod.rs")); } #[test] fn test_measure_text_width_osc_hyperlink_non_ascii() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/modバー.rs")); } #[test] fn test_parse_first_style() { let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n"; let style = parse_first_style(minus_line_from_unconfigured_git); let expected_style = ansi_term::Style { foreground: Some(ansi_term::Color::Red), ..ansi_term::Style::default() }; assert_eq!(Some(expected_style), style); } #[test] fn test_string_starts_with_ansi_escape_sequence() { assert!(!string_starts_with_ansi_style_sequence("")); assert!(!string_starts_with_ansi_style_sequence("-")); assert!(string_starts_with_ansi_style_sequence( "\x1b[31m-XXX\x1b[m\n" )); assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX")); } #[test] fn test_ansi_preserving_slice_and_index() { assert_eq!(ansi_preserving_slice("", 0), ""); assert_eq!(ansi_preserving_index("", 0), None); assert_eq!(ansi_preserving_slice("0", 0), "0"); assert_eq!(ansi_preserving_index("0", 0), Some(0)); assert_eq!(ansi_preserving_slice("0", 1), ""); assert_eq!(ansi_preserving_index("0", 1), None); let raw_string = "\x1b[1;35m0123456789\x1b[0m"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;35m123456789\x1b[0m" ); assert_eq!(ansi_preserving_slice(raw_string, 7), "\x1b[1;35m789\x1b[0m"); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(14)); let raw_string = "\x1b[1;36m0\x1b[m\x1b[1;36m123456789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;36m\x1b[m\x1b[1;36m123456789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(18)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); let raw_string = "\x1b[1;36m012345\x1b[m\x1b[1;36m6789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 3), "\x1b[1;36m345\x1b[m\x1b[1;36m6789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); } #[test] fn test_truncate_str() { assert_eq!(truncate_str("1", 1, ""), "1"); assert_eq!(truncate_str("12", 1, ""), "1"); assert_eq!(truncate_str("123", 2, "s"), "1s"); assert_eq!(truncate_str("123", 2, "→"), "1→"); assert_eq!(truncate_str("12ݶ", 1, "ݶ"), "ݶ"); } }
{ result.push_str(t); }
conditional_block
mod.rs
mod console_tests; mod iterator; use std::borrow::Cow; use ansi_term::Style; use itertools::Itertools; use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthStr; use iterator::{AnsiElementIterator, Element}; pub const ANSI_CSI_CLEAR_TO_EOL: &str = "\x1b[0K"; pub const ANSI_CSI_CLEAR_TO_BOL: &str = "\x1b[1K"; pub const ANSI_SGR_RESET: &str = "\x1b[0m"; pub const ANSI_SGR_REVERSE: &str = "\x1b[7m"; pub fn strip_ansi_codes(s: &str) -> String { strip_ansi_codes_from_strings_iterator(ansi_strings_iterator(s)) } pub fn measure_text_width(s: &str) -> usize { ansi_strings_iterator(s).fold(0, |acc, (element, is_ansi)| { acc + if is_ansi { 0 } else { element.width() } }) } /// Truncate string such that `tail` is present as a suffix, preceded by as much of `s` as can be /// displayed in the requested width. // Return string constructed as follows: // 1. `display_width` characters are available. If the string fits, return it. // // 2. Contribute graphemes and ANSI escape sequences from `tail` until either (1) `tail` is // exhausted, or (2) the display width of the result would exceed `display_width`. // // 3. If tail was exhausted, then contribute graphemes and ANSI escape sequences from `s` until the // display_width of the result would exceed `display_width`. pub fn truncate_str<'a>(s: &'a str, display_width: usize, tail: &str) -> Cow<'a, str> { let items = ansi_strings_iterator(s).collect::<Vec<(&str, bool)>>(); let width = strip_ansi_codes_from_strings_iterator(items.iter().copied()).width(); if width <= display_width { return Cow::from(s); } let result_tail = if!tail.is_empty() { truncate_str(tail, display_width, "").to_string() } else { String::new() }; let mut used = measure_text_width(&result_tail); let mut result = String::new(); for (t, is_ansi) in items { if!is_ansi { for g in t.graphemes(true) { let w = g.width(); if used + w > display_width { result.push_str(&" ".repeat(display_width.saturating_sub(used))); break; } result.push_str(g); used += w; } } else { result.push_str(t); } } Cow::from(format!("{result}{result_tail}")) } pub fn parse_style_sections(s: &str) -> Vec<(ansi_term::Style, &str)> { let mut sections = Vec::new(); let mut curr_style = Style::default(); for element in AnsiElementIterator::new(s) { match element { Element::Text(start, end) => sections.push((curr_style, &s[start..end])), Element::Sgr(style, _, _) => curr_style = style, _ => {} } } sections } // Return the first CSI element, if any, as an `ansi_term::Style`. pub fn parse_first_style(s: &str) -> Option<ansi_term::Style> { AnsiElementIterator::new(s).find_map(|el| match el { Element::Sgr(style, _, _) => Some(style), _ => None, }) } pub fn string_starts_with_ansi_style_sequence(s: &str) -> bool { AnsiElementIterator::new(s) .next() .map(|el| matches!(el, Element::Sgr(_, _, _))) .unwrap_or(false) } /// Return string formed from a byte slice starting at byte position `start`, where the index /// counts bytes in non-ANSI-escape-sequence content only. All ANSI escape sequences in the /// original string are preserved. pub fn ansi_preserving_slice(s: &str, start: usize) -> String { AnsiElementIterator::new(s) .scan(0, |index, element| { // `index` is the index in non-ANSI-escape-sequence content. Some(match element { Element::Sgr(_, a, b) => &s[a..b], Element::Csi(a, b) => &s[a..b], Element::Esc(a, b) => &s[a..b], Element::Osc(a, b) => &s[a..b], Element::Text(a, b) => { let i = *index; *index += b - a; if *index <= start { // This text segment ends before start, so contributes no bytes. "" } else if i > start { // This section starts after `start`, so contributes all its bytes. &s[a..b] } else { // This section contributes those bytes that are >= start &s[(a + start - i)..b] } } }) }) .join("") } /// Return the byte index in `s` of the i-th text byte in `s`. I.e. `i` counts /// bytes in non-ANSI-escape-sequence content only. pub fn ansi_preserving_index(s: &str, i: usize) -> Option<usize> { let mut index = 0; for element in AnsiElementIterator::new(s) { if let Element::Text(a, b) = element { index += b - a; if index > i { return Some(b - (index - i)); } } } None } fn ansi_strings_iterator(s: &str) -> impl Iterator<Item = (&str, bool)> { AnsiElementIterator::new(s).map(move |el| match el { Element::Sgr(_, i, j) => (&s[i..j], true), Element::Csi(i, j) => (&s[i..j], true), Element::Esc(i, j) => (&s[i..j], true), Element::Osc(i, j) => (&s[i..j], true), Element::Text(i, j) => (&s[i..j], false), }) } fn strip_ansi_codes_from_strings_iterator<'a>( strings: impl Iterator<Item = (&'a str, bool)>, ) -> String { strings .filter_map(|(el, is_ansi)| if!is_ansi { Some(el) } else { None }) .join("") } pub fn explain_ansi(line: &str, colorful: bool) -> String { use crate::style::Style; parse_style_sections(line) .into_iter() .map(|(ansi_term_style, s)| { let style = Style { ansi_term_style, ..Style::default() }; if colorful { format!("({}){}", style.to_painted_string(), style.paint(s)) } else { format!("({style}){s}") } }) .collect() } #[cfg(test)] mod tests { use crate::ansi::ansi_preserving_index; // Note that src/ansi/console_tests.rs contains additional test coverage for this module. use super::{ ansi_preserving_slice, measure_text_width, parse_first_style, string_starts_with_ansi_style_sequence, strip_ansi_codes, truncate_str, }; #[test] fn test_strip_ansi_codes() { for s in &["src/ansi/mod.rs", "バー", "src/ansi/modバー.rs"] { assert_eq!(strip_ansi_codes(s), *s); } assert_eq!(strip_ansi_codes("\x1b[31mバー\x1b[0m"), "バー"); } #[test] fn test_measure_text_width() { assert_eq!(measure_text_width("src/ansi/mod.rs"), 15); assert_eq!(measure_text_width("バー"), 4); assert_eq!(measure_text_width("src/ansi/modバー.rs"), 19); assert_eq!(measure_text_width("\x1b[31mバー\x1b[0m"), 4); assert_eq!(measure_text_width("a\nb\n"), 2); } #[test] fn test_strip_ansi_codes_osc_hyperlink() { assert_eq!(strip_ansi_codes("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m\n"), "src/ansi/mod.rs\n"); } #[test] fn test_measure_text_width_osc_hyperlink() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/mod.rs\x1b]8;;\x1b\\\x1b[0m"),
measure_text_width("src/ansi/mod.rs")); } #[test] fn test_measure_text_width_osc_hyperlink_non_ascii() { assert_eq!(measure_text_width("\x1b[38;5;4m\x1b]8;;file:///Users/dan/src/delta/src/ansi/mod.rs\x1b\\src/ansi/modバー.rs\x1b]8;;\x1b\\\x1b[0m"), measure_text_width("src/ansi/modバー.rs")); } #[test] fn test_parse_first_style() { let minus_line_from_unconfigured_git = "\x1b[31m-____\x1b[m\n"; let style = parse_first_style(minus_line_from_unconfigured_git); let expected_style = ansi_term::Style { foreground: Some(ansi_term::Color::Red), ..ansi_term::Style::default() }; assert_eq!(Some(expected_style), style); } #[test] fn test_string_starts_with_ansi_escape_sequence() { assert!(!string_starts_with_ansi_style_sequence("")); assert!(!string_starts_with_ansi_style_sequence("-")); assert!(string_starts_with_ansi_style_sequence( "\x1b[31m-XXX\x1b[m\n" )); assert!(string_starts_with_ansi_style_sequence("\x1b[32m+XXX")); } #[test] fn test_ansi_preserving_slice_and_index() { assert_eq!(ansi_preserving_slice("", 0), ""); assert_eq!(ansi_preserving_index("", 0), None); assert_eq!(ansi_preserving_slice("0", 0), "0"); assert_eq!(ansi_preserving_index("0", 0), Some(0)); assert_eq!(ansi_preserving_slice("0", 1), ""); assert_eq!(ansi_preserving_index("0", 1), None); let raw_string = "\x1b[1;35m0123456789\x1b[0m"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;35m123456789\x1b[0m" ); assert_eq!(ansi_preserving_slice(raw_string, 7), "\x1b[1;35m789\x1b[0m"); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(14)); let raw_string = "\x1b[1;36m0\x1b[m\x1b[1;36m123456789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 1), "\x1b[1;36m\x1b[m\x1b[1;36m123456789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(18)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); let raw_string = "\x1b[1;36m012345\x1b[m\x1b[1;36m6789\x1b[m\n"; assert_eq!( ansi_preserving_slice(raw_string, 3), "\x1b[1;36m345\x1b[m\x1b[1;36m6789\x1b[m\n" ); assert_eq!(ansi_preserving_index(raw_string, 0), Some(7)); assert_eq!(ansi_preserving_index(raw_string, 1), Some(8)); assert_eq!(ansi_preserving_index(raw_string, 7), Some(24)); } #[test] fn test_truncate_str() { assert_eq!(truncate_str("1", 1, ""), "1"); assert_eq!(truncate_str("12", 1, ""), "1"); assert_eq!(truncate_str("123", 2, "s"), "1s"); assert_eq!(truncate_str("123", 2, "→"), "1→"); assert_eq!(truncate_str("12ݶ", 1, "ݶ"), "ݶ"); } }
random_line_split
main.rs
#![no_std] #![no_main] #![feature(asm)] #![feature(collections)] extern crate stm32f7_discovery as stm32f7; extern crate collections; extern crate r0; pub mod plot; pub mod model; pub mod temp_sensor; pub mod time; pub mod util; pub mod pid; pub mod ramp; pub mod state_button; mod leak; use stm32f7::{system_clock,board,embedded,sdram,lcd,touch,i2c}; use stm32f7::lcd::*; use embedded::interfaces::gpio; use embedded::interfaces::gpio::{Gpio}; use board::spi::Spi; use time::*; use util::*; use plot::DragDirection; use embedded::util::delay; use model::TouchEvent::*; use collections::*; use collections::boxed::Box; use leak::Leak; use self::temp_sensor::{TemperatureSensor,Max6675}; use state_button::State; static TTF: &[u8] = include_bytes!("RobotoMono-Bold.ttf"); #[no_mangle] pub unsafe extern "C" fn reset() { extern "C" { static __DATA_LOAD: u32; static __DATA_END: u32; static mut __DATA_START: u32; static mut __BSS_START: u32; static mut __BSS_END: u32; } let data_load = &__DATA_LOAD; let data_start = &mut __DATA_START; let data_end = &__DATA_END; let bss_start = &mut __BSS_START; let bss_end = &__BSS_END; r0::init_data(data_start, data_end, data_load); r0::zero_bss(bss_start, bss_end); stm32f7::heap::init(); // enable floating point unit let scb = stm32f7::cortex_m::peripheral::scb_mut(); scb.cpacr.modify(|v| v | 0b1111 << 20); asm!("DSB; ISB;"::::"volatile"); // pipeline flush main(board::hw()); } // WORKAROUND: rust compiler will inline & reorder fp instructions into #[inline(never)] // reset() before the FPU is initialized fn main(hw: board::Hardware) ->! { let board::Hardware { rcc, pwr, flash, fmc, ltdc, gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k, spi_2, i2c_3, .. } = hw; let mut gpio = Gpio::new(gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k); system_clock::init(rcc, pwr, flash); // Peripheral clock configuration { // enable all gpio ports rcc.ahb1enr.update(|r| { r.set_gpioaen(true); r.set_gpioben(true); r.set_gpiocen(true); r.set_gpioden(true); r.set_gpioeen(true); r.set_gpiofen(true); r.set_gpiogen(true); r.set_gpiohen(true); r.set_gpioien(true); r.set_gpiojen(true); r.set_gpioken(true); }); // Enable SPI_2 rcc.apb1enr.update(|apb1enr| { apb1enr.set_spi2en(true); }); delay(1); } // i2c configuration i2c::init_pins_and_clocks(rcc, &mut gpio); let mut i2c_3 = i2c::init(i2c_3); i2c_3.test_1(); i2c_3.test_2(); let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2); // init sdram (needed for display buffer) sdram::init(rcc, fmc, &mut gpio); let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2); let mut pwm_gpio = gpio.to_output(pwm_pin, gpio::OutputType::PushPull, gpio::OutputSpeed::High,
let drag_color = Color::from_hex(0x000000); let grid_color = Color::from_hex(0x444444); // lcd controller let mut lcd = lcd::init(ltdc, rcc, &mut gpio); touch::check_family_id(&mut i2c_3).unwrap(); loop { SYSCLOCK.reset(); lcd.clear_screen(); lcd.set_background_color(Color::from_hex(0x000000)); let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak(); let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak(); let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32), model::Range::new(0f32, 200f32), plot_font, rtval_font, axis_color, grid_color, drag_color, 80, // drag timeout ); plot.draw_axis(&mut lcd); //let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32); //let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting //let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different let mut smoother = pid::Smoother::new(10); let mut measurement_start_system_time = SYSCLOCK.get_ticks(); let mut last_measurement_system_time = SYSCLOCK.get_ticks(); let mut duty_cycle: usize = 0; let mut temp = 20f32; let mut state_button = state_button::StateButton::new( Color::from_hex(0x222222), Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40} ); state_button.render(&mut lcd); let mut last_touch_event = None; 'mainloop: loop { let ticks = SYSCLOCK.get_ticks(); let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks); if delta_measurement.to_msecs() >= 500 { let val = temp_sensor.read(); let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs(); let measurement = model::TimeTemp{ time: measurement_time, // TODO just integer divide here? temp: val as f32, }; match state_button.state() { State::RUNNING => plot.add_measurement(measurement, &mut lcd), State::RESETTED => { plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd); plot.update_ramp_start(&mut lcd); }, State::STOPPED => {}, } if let State::RUNNING = state_button.state() { smoother.push_value(val); let smooth_temp = smoother.get_average(); let ramp_target_temp = plot.ramp().evaluate(measurement_time); let error = ramp_target_temp - smooth_temp; let pid_value = pid_controller.cycle(error, &delta_measurement); duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize; lcd.draw_point_color( Point{ x: plot.transform_time(measurement_time), y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value) }, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555()); //let pid_clamped = util::clamp(pid_value, 0f32, 1f32); //temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0; } else { duty_cycle = 0; } last_measurement_system_time = ticks; } pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle); // poll for new touch data let mut touches = false; for touch in &touch::touches(&mut i2c_3).unwrap() { touches = true; let touch = model::Touch{ location: Point{ x: touch.x, y: touch.y }, time: ticks }; let touch_event = match last_touch_event { Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch), None | Some(TouchUp(_)) => TouchDown(touch), }; //Do not allow changing ramp in stopped state match state_button.state() { State::RUNNING | State::RESETTED => plot.handle_touch(touch_event, &mut lcd), _ => {}, } last_touch_event = Some(touch_event); } // Deliver touch-up events if!touches && last_touch_event.is_some() { let touch_event = match last_touch_event.unwrap() { TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time).to_msecs() > 200 => { if let Some(new_state) = state_button.handle_touch(TouchUp(t), &mut lcd) { match new_state { State::RESETTED => { break'mainloop; }, State::RUNNING => { measurement_start_system_time = SYSCLOCK.get_ticks(); last_measurement_system_time = measurement_start_system_time; }, _ => {}, } } plot.handle_touch(TouchUp(t), &mut lcd); None }, x => Some(x), }; last_touch_event = touch_event; } } } } /// Initialize temperature sensor on SPI_2 port (GPIO pins) /// IMPORTANT: "Table 3. Arduino connectors" in the discovery board datasheet // states SPI2_NSS is pin D10. This is wrong.AsMut // SPI2_NSS is D5, as seen in "Figure 25: Arduino Uno connectors" fn temp_sensor_init_spi2(gpio: &mut Gpio, spi_2: &'static mut Spi) -> Max6675 { let sck_pin = (gpio::Port::PortI, gpio::Pin::Pin1); let miso_pin = (gpio::Port::PortB, gpio::Pin::Pin14); let mosi_pin = (gpio::Port::PortB, gpio::Pin::Pin15); let nss_pin = (gpio::Port::PortI, gpio::Pin::Pin0); gpio.to_alternate_function(sck_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure sck"); // TODO the MOSI pin is not necessarily necessary for MAX6675 gpio.to_alternate_function(mosi_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure mosi"); gpio.to_alternate_function(miso_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure MISO pin"); gpio.to_alternate_function(nss_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure NSS "); return Max6675::init(spi_2); }
gpio::Resistor::NoPull) .expect("Could not configure pwm pin"); let axis_color = Color::from_hex(0xffffff);
random_line_split
main.rs
#![no_std] #![no_main] #![feature(asm)] #![feature(collections)] extern crate stm32f7_discovery as stm32f7; extern crate collections; extern crate r0; pub mod plot; pub mod model; pub mod temp_sensor; pub mod time; pub mod util; pub mod pid; pub mod ramp; pub mod state_button; mod leak; use stm32f7::{system_clock,board,embedded,sdram,lcd,touch,i2c}; use stm32f7::lcd::*; use embedded::interfaces::gpio; use embedded::interfaces::gpio::{Gpio}; use board::spi::Spi; use time::*; use util::*; use plot::DragDirection; use embedded::util::delay; use model::TouchEvent::*; use collections::*; use collections::boxed::Box; use leak::Leak; use self::temp_sensor::{TemperatureSensor,Max6675}; use state_button::State; static TTF: &[u8] = include_bytes!("RobotoMono-Bold.ttf"); #[no_mangle] pub unsafe extern "C" fn reset() { extern "C" { static __DATA_LOAD: u32; static __DATA_END: u32; static mut __DATA_START: u32; static mut __BSS_START: u32; static mut __BSS_END: u32; } let data_load = &__DATA_LOAD; let data_start = &mut __DATA_START; let data_end = &__DATA_END; let bss_start = &mut __BSS_START; let bss_end = &__BSS_END; r0::init_data(data_start, data_end, data_load); r0::zero_bss(bss_start, bss_end); stm32f7::heap::init(); // enable floating point unit let scb = stm32f7::cortex_m::peripheral::scb_mut(); scb.cpacr.modify(|v| v | 0b1111 << 20); asm!("DSB; ISB;"::::"volatile"); // pipeline flush main(board::hw()); } // WORKAROUND: rust compiler will inline & reorder fp instructions into #[inline(never)] // reset() before the FPU is initialized fn
(hw: board::Hardware) ->! { let board::Hardware { rcc, pwr, flash, fmc, ltdc, gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k, spi_2, i2c_3, .. } = hw; let mut gpio = Gpio::new(gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k); system_clock::init(rcc, pwr, flash); // Peripheral clock configuration { // enable all gpio ports rcc.ahb1enr.update(|r| { r.set_gpioaen(true); r.set_gpioben(true); r.set_gpiocen(true); r.set_gpioden(true); r.set_gpioeen(true); r.set_gpiofen(true); r.set_gpiogen(true); r.set_gpiohen(true); r.set_gpioien(true); r.set_gpiojen(true); r.set_gpioken(true); }); // Enable SPI_2 rcc.apb1enr.update(|apb1enr| { apb1enr.set_spi2en(true); }); delay(1); } // i2c configuration i2c::init_pins_and_clocks(rcc, &mut gpio); let mut i2c_3 = i2c::init(i2c_3); i2c_3.test_1(); i2c_3.test_2(); let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2); // init sdram (needed for display buffer) sdram::init(rcc, fmc, &mut gpio); let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2); let mut pwm_gpio = gpio.to_output(pwm_pin, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure pwm pin"); let axis_color = Color::from_hex(0xffffff); let drag_color = Color::from_hex(0x000000); let grid_color = Color::from_hex(0x444444); // lcd controller let mut lcd = lcd::init(ltdc, rcc, &mut gpio); touch::check_family_id(&mut i2c_3).unwrap(); loop { SYSCLOCK.reset(); lcd.clear_screen(); lcd.set_background_color(Color::from_hex(0x000000)); let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak(); let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak(); let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32), model::Range::new(0f32, 200f32), plot_font, rtval_font, axis_color, grid_color, drag_color, 80, // drag timeout ); plot.draw_axis(&mut lcd); //let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32); //let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting //let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different let mut smoother = pid::Smoother::new(10); let mut measurement_start_system_time = SYSCLOCK.get_ticks(); let mut last_measurement_system_time = SYSCLOCK.get_ticks(); let mut duty_cycle: usize = 0; let mut temp = 20f32; let mut state_button = state_button::StateButton::new( Color::from_hex(0x222222), Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40} ); state_button.render(&mut lcd); let mut last_touch_event = None; 'mainloop: loop { let ticks = SYSCLOCK.get_ticks(); let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks); if delta_measurement.to_msecs() >= 500 { let val = temp_sensor.read(); let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs(); let measurement = model::TimeTemp{ time: measurement_time, // TODO just integer divide here? temp: val as f32, }; match state_button.state() { State::RUNNING => plot.add_measurement(measurement, &mut lcd), State::RESETTED => { plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd); plot.update_ramp_start(&mut lcd); }, State::STOPPED => {}, } if let State::RUNNING = state_button.state() { smoother.push_value(val); let smooth_temp = smoother.get_average(); let ramp_target_temp = plot.ramp().evaluate(measurement_time); let error = ramp_target_temp - smooth_temp; let pid_value = pid_controller.cycle(error, &delta_measurement); duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize; lcd.draw_point_color( Point{ x: plot.transform_time(measurement_time), y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value) }, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555()); //let pid_clamped = util::clamp(pid_value, 0f32, 1f32); //temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0; } else { duty_cycle = 0; } last_measurement_system_time = ticks; } pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle); // poll for new touch data let mut touches = false; for touch in &touch::touches(&mut i2c_3).unwrap() { touches = true; let touch = model::Touch{ location: Point{ x: touch.x, y: touch.y }, time: ticks }; let touch_event = match last_touch_event { Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch), None | Some(TouchUp(_)) => TouchDown(touch), }; //Do not allow changing ramp in stopped state match state_button.state() { State::RUNNING | State::RESETTED => plot.handle_touch(touch_event, &mut lcd), _ => {}, } last_touch_event = Some(touch_event); } // Deliver touch-up events if!touches && last_touch_event.is_some() { let touch_event = match last_touch_event.unwrap() { TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time).to_msecs() > 200 => { if let Some(new_state) = state_button.handle_touch(TouchUp(t), &mut lcd) { match new_state { State::RESETTED => { break'mainloop; }, State::RUNNING => { measurement_start_system_time = SYSCLOCK.get_ticks(); last_measurement_system_time = measurement_start_system_time; }, _ => {}, } } plot.handle_touch(TouchUp(t), &mut lcd); None }, x => Some(x), }; last_touch_event = touch_event; } } } } /// Initialize temperature sensor on SPI_2 port (GPIO pins) /// IMPORTANT: "Table 3. Arduino connectors" in the discovery board datasheet // states SPI2_NSS is pin D10. This is wrong.AsMut // SPI2_NSS is D5, as seen in "Figure 25: Arduino Uno connectors" fn temp_sensor_init_spi2(gpio: &mut Gpio, spi_2: &'static mut Spi) -> Max6675 { let sck_pin = (gpio::Port::PortI, gpio::Pin::Pin1); let miso_pin = (gpio::Port::PortB, gpio::Pin::Pin14); let mosi_pin = (gpio::Port::PortB, gpio::Pin::Pin15); let nss_pin = (gpio::Port::PortI, gpio::Pin::Pin0); gpio.to_alternate_function(sck_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure sck"); // TODO the MOSI pin is not necessarily necessary for MAX6675 gpio.to_alternate_function(mosi_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure mosi"); gpio.to_alternate_function(miso_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure MISO pin"); gpio.to_alternate_function(nss_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure NSS "); return Max6675::init(spi_2); }
main
identifier_name
main.rs
#![no_std] #![no_main] #![feature(asm)] #![feature(collections)] extern crate stm32f7_discovery as stm32f7; extern crate collections; extern crate r0; pub mod plot; pub mod model; pub mod temp_sensor; pub mod time; pub mod util; pub mod pid; pub mod ramp; pub mod state_button; mod leak; use stm32f7::{system_clock,board,embedded,sdram,lcd,touch,i2c}; use stm32f7::lcd::*; use embedded::interfaces::gpio; use embedded::interfaces::gpio::{Gpio}; use board::spi::Spi; use time::*; use util::*; use plot::DragDirection; use embedded::util::delay; use model::TouchEvent::*; use collections::*; use collections::boxed::Box; use leak::Leak; use self::temp_sensor::{TemperatureSensor,Max6675}; use state_button::State; static TTF: &[u8] = include_bytes!("RobotoMono-Bold.ttf"); #[no_mangle] pub unsafe extern "C" fn reset()
// enable floating point unit let scb = stm32f7::cortex_m::peripheral::scb_mut(); scb.cpacr.modify(|v| v | 0b1111 << 20); asm!("DSB; ISB;"::::"volatile"); // pipeline flush main(board::hw()); } // WORKAROUND: rust compiler will inline & reorder fp instructions into #[inline(never)] // reset() before the FPU is initialized fn main(hw: board::Hardware) ->! { let board::Hardware { rcc, pwr, flash, fmc, ltdc, gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k, spi_2, i2c_3, .. } = hw; let mut gpio = Gpio::new(gpio_a, gpio_b, gpio_c, gpio_d, gpio_e, gpio_f, gpio_g, gpio_h, gpio_i, gpio_j, gpio_k); system_clock::init(rcc, pwr, flash); // Peripheral clock configuration { // enable all gpio ports rcc.ahb1enr.update(|r| { r.set_gpioaen(true); r.set_gpioben(true); r.set_gpiocen(true); r.set_gpioden(true); r.set_gpioeen(true); r.set_gpiofen(true); r.set_gpiogen(true); r.set_gpiohen(true); r.set_gpioien(true); r.set_gpiojen(true); r.set_gpioken(true); }); // Enable SPI_2 rcc.apb1enr.update(|apb1enr| { apb1enr.set_spi2en(true); }); delay(1); } // i2c configuration i2c::init_pins_and_clocks(rcc, &mut gpio); let mut i2c_3 = i2c::init(i2c_3); i2c_3.test_1(); i2c_3.test_2(); let mut temp_sensor = temp_sensor_init_spi2(&mut gpio, spi_2); // init sdram (needed for display buffer) sdram::init(rcc, fmc, &mut gpio); let pwm_pin = (gpio::Port::PortI, gpio::Pin::Pin2); let mut pwm_gpio = gpio.to_output(pwm_pin, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure pwm pin"); let axis_color = Color::from_hex(0xffffff); let drag_color = Color::from_hex(0x000000); let grid_color = Color::from_hex(0x444444); // lcd controller let mut lcd = lcd::init(ltdc, rcc, &mut gpio); touch::check_family_id(&mut i2c_3).unwrap(); loop { SYSCLOCK.reset(); lcd.clear_screen(); lcd.set_background_color(Color::from_hex(0x000000)); let plot_font = Box::new(Font::new(TTF, 11).unwrap()).leak(); let rtval_font = Box::new(Font::new(TTF, 14).unwrap()).leak(); let mut plot = plot::Plot::new(model::Range::new(0f32, (20*60) as f32), model::Range::new(0f32, 200f32), plot_font, rtval_font, axis_color, grid_color, drag_color, 80, // drag timeout ); plot.draw_axis(&mut lcd); //let mut pid_controller = pid::PIDController::new(0.3f32, 0.0f32, 0.0f32); //let mut pid_controller = pid::PIDController::new(0.1f32, 0.0f32, 0.3f32); // Definitely better than first, but overshooting //let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.3f32); // Not much different let mut pid_controller = pid::PIDController::new(0.2f32, 0.0f32, 0.6f32); // Not much different let mut smoother = pid::Smoother::new(10); let mut measurement_start_system_time = SYSCLOCK.get_ticks(); let mut last_measurement_system_time = SYSCLOCK.get_ticks(); let mut duty_cycle: usize = 0; let mut temp = 20f32; let mut state_button = state_button::StateButton::new( Color::from_hex(0x222222), Rect{origin: Point{x: 440, y: 0}, width: 40, height: 40} ); state_button.render(&mut lcd); let mut last_touch_event = None; 'mainloop: loop { let ticks = SYSCLOCK.get_ticks(); let delta_measurement = time::delta_checked(&last_measurement_system_time, &ticks); if delta_measurement.to_msecs() >= 500 { let val = temp_sensor.read(); let measurement_time = time::delta_checked(&measurement_start_system_time, &ticks).to_secs(); let measurement = model::TimeTemp{ time: measurement_time, // TODO just integer divide here? temp: val as f32, }; match state_button.state() { State::RUNNING => plot.add_measurement(measurement, &mut lcd), State::RESETTED => { plot.set_measurement(model::TimeTemp{time: 0f32, temp: measurement.temp}, &mut lcd); plot.update_ramp_start(&mut lcd); }, State::STOPPED => {}, } if let State::RUNNING = state_button.state() { smoother.push_value(val); let smooth_temp = smoother.get_average(); let ramp_target_temp = plot.ramp().evaluate(measurement_time); let error = ramp_target_temp - smooth_temp; let pid_value = pid_controller.cycle(error, &delta_measurement); duty_cycle = (util::clamp(pid_value, 0f32, 1f32) * 1000f32) as usize; lcd.draw_point_color( Point{ x: plot.transform_time(measurement_time), y: plot::Plot::transform_ranges(model::Range{from: 0f32, to: 1f32}, plot::Y_PX_RANGE, pid_value) }, Layer::Layer2, Color::from_hex(0x0000ff).to_argb1555()); //let pid_clamped = util::clamp(pid_value, 0f32, 1f32); //temp += (pid_clamped - 0.3) * delta_measurement.to_secs() * 1.0; } else { duty_cycle = 0; } last_measurement_system_time = ticks; } pwm_gpio.set(ticks.to_msecs() % 1000 < duty_cycle); // poll for new touch data let mut touches = false; for touch in &touch::touches(&mut i2c_3).unwrap() { touches = true; let touch = model::Touch{ location: Point{ x: touch.x, y: touch.y }, time: ticks }; let touch_event = match last_touch_event { Some(TouchDown(_)) | Some(TouchMove(_)) => TouchMove(touch), None | Some(TouchUp(_)) => TouchDown(touch), }; //Do not allow changing ramp in stopped state match state_button.state() { State::RUNNING | State::RESETTED => plot.handle_touch(touch_event, &mut lcd), _ => {}, } last_touch_event = Some(touch_event); } // Deliver touch-up events if!touches && last_touch_event.is_some() { let touch_event = match last_touch_event.unwrap() { TouchDown(t) | TouchMove(t) if time::delta(&ticks,&t.time).to_msecs() > 200 => { if let Some(new_state) = state_button.handle_touch(TouchUp(t), &mut lcd) { match new_state { State::RESETTED => { break'mainloop; }, State::RUNNING => { measurement_start_system_time = SYSCLOCK.get_ticks(); last_measurement_system_time = measurement_start_system_time; }, _ => {}, } } plot.handle_touch(TouchUp(t), &mut lcd); None }, x => Some(x), }; last_touch_event = touch_event; } } } } /// Initialize temperature sensor on SPI_2 port (GPIO pins) /// IMPORTANT: "Table 3. Arduino connectors" in the discovery board datasheet // states SPI2_NSS is pin D10. This is wrong.AsMut // SPI2_NSS is D5, as seen in "Figure 25: Arduino Uno connectors" fn temp_sensor_init_spi2(gpio: &mut Gpio, spi_2: &'static mut Spi) -> Max6675 { let sck_pin = (gpio::Port::PortI, gpio::Pin::Pin1); let miso_pin = (gpio::Port::PortB, gpio::Pin::Pin14); let mosi_pin = (gpio::Port::PortB, gpio::Pin::Pin15); let nss_pin = (gpio::Port::PortI, gpio::Pin::Pin0); gpio.to_alternate_function(sck_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure sck"); // TODO the MOSI pin is not necessarily necessary for MAX6675 gpio.to_alternate_function(mosi_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure mosi"); gpio.to_alternate_function(miso_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure MISO pin"); gpio.to_alternate_function(nss_pin, gpio::AlternateFunction::AF5, gpio::OutputType::PushPull, gpio::OutputSpeed::High, gpio::Resistor::NoPull) .expect("Could not configure NSS "); return Max6675::init(spi_2); }
{ extern "C" { static __DATA_LOAD: u32; static __DATA_END: u32; static mut __DATA_START: u32; static mut __BSS_START: u32; static mut __BSS_END: u32; } let data_load = &__DATA_LOAD; let data_start = &mut __DATA_START; let data_end = &__DATA_END; let bss_start = &mut __BSS_START; let bss_end = &__BSS_END; r0::init_data(data_start, data_end, data_load); r0::zero_bss(bss_start, bss_end); stm32f7::heap::init();
identifier_body
system_information.rs
use crate::{SMBiosStruct, UndefinedStruct}; use serde::{ser::SerializeStruct, Serialize, Serializer}; use core::{ array::TryFromSliceError, convert::{TryFrom, TryInto}, fmt, ops::Deref, any }; #[cfg(feature = "no_std")] use alloc::{string::String, format}; /// # System Information (Type 1) /// /// The information in this structure defines attributes of the overall system and is intended to be associated /// with the Component ID group of the system’s MIF. An SMBIOS implementation is associated with a single /// system instance and contains one and only one System Information (Type 1) structure. /// /// Compliant with: /// DMTF SMBIOS Reference Specification 3.4.0 (DSP0134) /// Document Date: 2020-07-17 pub struct SMBiosSystemInformation<'a> { parts: &'a UndefinedStruct, } impl<'a> SMBiosStruct<'a> for SMBiosSystemInformation<'a> { const STRUCT_TYPE: u8 = 1u8; fn new(parts: &'a UndefinedStruct) -> Self {
fn parts(&self) -> &'a UndefinedStruct { self.parts } } impl<'a> SMBiosSystemInformation<'a> { /// Manufacturer pub fn manufacturer(&self) -> Option<String> { self.parts.get_field_string(0x04) } /// Product name pub fn product_name(&self) -> Option<String> { self.parts.get_field_string(0x05) } /// Version pub fn version(&self) -> Option<String> { self.parts.get_field_string(0x06) } /// Serial number pub fn serial_number(&self) -> Option<String> { self.parts.get_field_string(0x07) } /// System UUID pub fn uuid(&self) -> Option<SystemUuidData> { self.parts .get_field_data(0x08, 0x18) .map(|raw| SystemUuidData::try_from(raw).expect("A GUID is 0x10 bytes")) } /// Wake-up type /// /// Identifies the event that caused the system to power up. pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> { self.parts .get_field_byte(0x18) .map(|raw| SystemWakeUpTypeData::from(raw)) } /// SKU Number /// /// This text string identifies a particular computer /// configuration for sale. It is sometimes also /// called a product ID or purchase order number. /// This number is frequently found in existing /// fields, but there is no standard format. /// Typically for a given system board from a /// given OEM, there are tens of unique /// processor, memory, hard drive, and optical /// drive configurations. pub fn sku_number(&self) -> Option<String> { self.parts.get_field_string(0x19) } /// Family /// /// This text string identifies the family to which a /// particular computer belongs. A family refers to /// a set of computers that are similar but not /// identical from a hardware or software point of /// view. Typically, a family is composed of /// different computer models, which have /// different configurations and pricing points. /// Computers in the same family often have /// similar branding and cosmetic features. pub fn family(&self) -> Option<String> { self.parts.get_field_string(0x1A) } } impl fmt::Debug for SMBiosSystemInformation<'_> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>()) .field("header", &self.parts.header) .field("manufacturer", &self.manufacturer()) .field("product_name", &self.product_name()) .field("version", &self.version()) .field("serial_number", &self.serial_number()) .field("uuid", &self.uuid()) .field("wakeup_type", &self.wakeup_type()) .field("sku_number", &self.sku_number()) .field("family", &self.family()) .finish() } } impl Serialize for SMBiosSystemInformation<'_> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?; state.serialize_field("header", &self.parts.header)?; state.serialize_field("manufacturer", &self.manufacturer())?; state.serialize_field("product_name", &self.product_name())?; state.serialize_field("version", &self.version())?; state.serialize_field("serial_number", &self.serial_number())?; state.serialize_field("uuid", &self.uuid())?; state.serialize_field("wakeup_type", &self.wakeup_type())?; state.serialize_field("sku_number", &self.sku_number())?; state.serialize_field("family", &self.family())?; state.end() } } /// # System - UUID Data #[derive(Serialize, Debug)] pub enum SystemUuidData { /// The ID is not currently present in the system, but it can be set IdNotPresentButSettable, /// The ID is not present in the system IdNotPresent, /// System UUID Uuid(SystemUuid), } impl SystemUuidData { fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData { if array.iter().all(|&x| x == 0) { SystemUuidData::IdNotPresentButSettable } else if array.iter().all(|&x| x == 0xFF) { SystemUuidData::IdNotPresent } else { SystemUuidData::Uuid(SystemUuid::from(array)) } } } impl<'a> TryFrom<&'a [u8]> for SystemUuidData { type Error = TryFromSliceError; fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> { <&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array))) } } impl fmt::Display for SystemUuidData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &*self { SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"), SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"), SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid), } } } /// # System - UUID #[derive(PartialEq, Eq)] pub struct SystemUuid { /// Raw byte array for this UUID pub raw: [u8; 0x10], } impl SystemUuid { /// Low field of the timestamp pub fn time_low(&self) -> u32 { u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size")) } /// Middle field of the timestamp pub fn time_mid(&self) -> u16 { u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size")) } /// High field of the timestamp multiplexed with the version number pub fn time_high_and_version(&self) -> u16 { u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size")) } /// High field of the clock sequence multiplexed with the variant pub fn clock_seq_high_and_reserved(&self) -> u8 { self.raw[0x8] } /// Low field of the clock sequence pub fn clock_seq_low(&self) -> u8 { self.raw[0x9] } /// Spatially unique node identifier pub fn node(&self) -> &[u8; 6] { self.raw[0xA..0x10].try_into().expect("incorrect size") } } impl<'a> From<&'a [u8; 0x10]> for SystemUuid { fn from(raw: &'a [u8; 0x10]) -> Self { SystemUuid { raw: raw.clone() } } } impl fmt::Display for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Example output: // "00360FE7-D4D5-11E5-9C43-BC0000F00000" // <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]> write!( f, "{:08X}-{:04X}-{:04X}-{:02X}{:02X}-", self.time_low(), self.time_mid(), self.time_high_and_version(), self.clock_seq_high_and_reserved(), self.clock_seq_low() )?; self.node().iter().fold(Ok(()), |result, node_byte| { result.and_then(|_| write!(f, "{:02X}", node_byte)) }) } } impl fmt::Debug for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self) } } impl Serialize for SystemUuid { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(format!("{}", self).as_str()) } } /// # System - Wake-up Type Data pub struct SystemWakeUpTypeData { /// Raw value /// /// _raw_ is most useful when _value_ is None. /// This is most likely to occur when the standard was updated but /// this library code has not been updated to match the current /// standard. pub raw: u8, /// The contained [SystemWakeUpType] value pub value: SystemWakeUpType, } impl fmt::Debug for SystemWakeUpTypeData { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SystemWakeUpTypeData>()) .field("raw", &self.raw) .field("value", &self.value) .finish() } } impl Serialize for SystemWakeUpTypeData { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SystemWakeUpTypeData", 2)?; state.serialize_field("raw", &self.raw)?; state.serialize_field("value", &self.value)?; state.end() } } impl fmt::Display for SystemWakeUpTypeData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.value { SystemWakeUpType::None => write!(f, "{}", &self.raw), _ => write!(f, "{:?}", &self.value), } } } impl Deref for SystemWakeUpTypeData { type Target = SystemWakeUpType; fn deref(&self) -> &Self::Target { &self.value } } /// # System - Wake-up Type #[derive(Serialize, Debug, PartialEq, Eq)] pub enum SystemWakeUpType { /// Other Other, /// Unknown Unknown, /// APM Timer ApmTimer, /// Modem Ring ModernRing, /// LAN Remote LanRemote, /// Power Switch PowerSwitch, /// PCI PME# PciPme, /// AC Power Restored ACPowerRestored, /// A value unknown to this standard, check the raw value None, } impl From<u8> for SystemWakeUpTypeData { fn from(raw: u8) -> Self { SystemWakeUpTypeData { value: match raw { 0x01 => SystemWakeUpType::Other, 0x02 => SystemWakeUpType::Unknown, 0x03 => SystemWakeUpType::ApmTimer, 0x04 => SystemWakeUpType::ModernRing, 0x05 => SystemWakeUpType::LanRemote, 0x06 => SystemWakeUpType::PowerSwitch, 0x07 => SystemWakeUpType::PciPme, 0x08 => SystemWakeUpType::ACPowerRestored, _ => SystemWakeUpType::None, }, raw, } } } #[cfg(test)] mod tests { use super::*; #[test] fn unit_test() { let struct_type1 = vec![ 0x01, 0x1B, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0xD2, 0x01, 0x25, 0x3E, 0x48, 0xE6, 0x11, 0xE8, 0xBA, 0xD3, 0x70, 0x20, 0x84, 0x0F, 0x9D, 0x47, 0x06, 0x05, 0x06, b'L', b'E', b'N', b'O', b'V', b'O', 0x00, b'3', b'0', b'B', b'F', b'S', b'0', b'7', b'5', b'0', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'M', b'N', b'0', b'6', b'P', b'Q', b'R', b'S', 0x00, b'L', b'E', b'N', b'O', b'V', b'O', b'_', b'M', b'T', b'_', b'3', b'0', b'B', b'F', b'_', b'B', b'U', b'_', b'T', b'h', b'i', b'n', b'k', b'_', b'F', b'M', b'_', b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, 0x00, ]; let parts = UndefinedStruct::new(&struct_type1); let test_struct = SMBiosSystemInformation::new(&parts); assert_eq!(test_struct.manufacturer(), Some("LENOVO".to_string())); assert_eq!(test_struct.product_name(), Some("30BFS07500".to_string())); assert_eq!(test_struct.version(), Some("ThinkStation P520".to_string())); assert_eq!(test_struct.serial_number(), Some("MN06PQRS".to_string())); assert_eq!( format!("{:?}", test_struct.uuid()), "Some(Uuid(3E2501D2-E648-E811-BAD3-7020840F9D47))".to_string() ); assert_eq!( *test_struct.wakeup_type().unwrap(), SystemWakeUpType::PowerSwitch ); assert_eq!( test_struct.sku_number(), Some("LENOVO_MT_30BF_BU_Think_FM_ThinkStation P520".to_string()) ); assert_eq!(test_struct.family(), Some("ThinkStation P520".to_string())); } }
Self { parts } }
identifier_body
system_information.rs
use crate::{SMBiosStruct, UndefinedStruct}; use serde::{ser::SerializeStruct, Serialize, Serializer}; use core::{ array::TryFromSliceError, convert::{TryFrom, TryInto}, fmt, ops::Deref, any }; #[cfg(feature = "no_std")] use alloc::{string::String, format}; /// # System Information (Type 1) /// /// The information in this structure defines attributes of the overall system and is intended to be associated /// with the Component ID group of the system’s MIF. An SMBIOS implementation is associated with a single /// system instance and contains one and only one System Information (Type 1) structure. /// /// Compliant with: /// DMTF SMBIOS Reference Specification 3.4.0 (DSP0134) /// Document Date: 2020-07-17 pub struct SMBiosSystemInformation<'a> { parts: &'a UndefinedStruct, } impl<'a> SMBiosStruct<'a> for SMBiosSystemInformation<'a> { const STRUCT_TYPE: u8 = 1u8; fn new(parts: &'a UndefinedStruct) -> Self { Self { parts } } fn parts(&self) -> &'a UndefinedStruct { self.parts } } impl<'a> SMBiosSystemInformation<'a> { /// Manufacturer pub fn manufacturer(&self) -> Option<String> { self.parts.get_field_string(0x04) } /// Product name pub fn product_name(&self) -> Option<String> { self.parts.get_field_string(0x05) } /// Version pub fn version(&self) -> Option<String> { self.parts.get_field_string(0x06) } /// Serial number pub fn serial_number(&self) -> Option<String> { self.parts.get_field_string(0x07) } /// System UUID pub fn uuid(&self) -> Option<SystemUuidData> { self.parts .get_field_data(0x08, 0x18) .map(|raw| SystemUuidData::try_from(raw).expect("A GUID is 0x10 bytes")) } /// Wake-up type /// /// Identifies the event that caused the system to power up. pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> { self.parts .get_field_byte(0x18) .map(|raw| SystemWakeUpTypeData::from(raw)) } /// SKU Number /// /// This text string identifies a particular computer /// configuration for sale. It is sometimes also /// called a product ID or purchase order number. /// This number is frequently found in existing /// fields, but there is no standard format. /// Typically for a given system board from a /// given OEM, there are tens of unique /// processor, memory, hard drive, and optical /// drive configurations. pub fn sku_number(&self) -> Option<String> { self.parts.get_field_string(0x19) } /// Family /// /// This text string identifies the family to which a /// particular computer belongs. A family refers to /// a set of computers that are similar but not /// identical from a hardware or software point of /// view. Typically, a family is composed of /// different computer models, which have /// different configurations and pricing points. /// Computers in the same family often have /// similar branding and cosmetic features. pub fn family(&self) -> Option<String> { self.parts.get_field_string(0x1A) } } impl fmt::Debug for SMBiosSystemInformation<'_> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>()) .field("header", &self.parts.header) .field("manufacturer", &self.manufacturer()) .field("product_name", &self.product_name()) .field("version", &self.version()) .field("serial_number", &self.serial_number()) .field("uuid", &self.uuid()) .field("wakeup_type", &self.wakeup_type()) .field("sku_number", &self.sku_number()) .field("family", &self.family()) .finish() } } impl Serialize for SMBiosSystemInformation<'_> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?; state.serialize_field("header", &self.parts.header)?; state.serialize_field("manufacturer", &self.manufacturer())?; state.serialize_field("product_name", &self.product_name())?; state.serialize_field("version", &self.version())?; state.serialize_field("serial_number", &self.serial_number())?; state.serialize_field("uuid", &self.uuid())?; state.serialize_field("wakeup_type", &self.wakeup_type())?; state.serialize_field("sku_number", &self.sku_number())?; state.serialize_field("family", &self.family())?; state.end() } } /// # System - UUID Data #[derive(Serialize, Debug)] pub enum SystemUuidData { /// The ID is not currently present in the system, but it can be set IdNotPresentButSettable, /// The ID is not present in the system IdNotPresent, /// System UUID Uuid(SystemUuid), } impl SystemUuidData { fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData { if array.iter().all(|&x| x == 0) { SystemUuidData::IdNotPresentButSettable } else if array.iter().all(|&x| x == 0xFF) { SystemUuidData::IdNotPresent } else { SystemUuidData::Uuid(SystemUuid::from(array)) } } } impl<'a> TryFrom<&'a [u8]> for SystemUuidData { type Error = TryFromSliceError; fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> { <&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array))) } } impl fmt::Display for SystemUuidData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &*self { SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"), SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"), SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid), } } } /// # System - UUID #[derive(PartialEq, Eq)] pub struct SystemUuid { /// Raw byte array for this UUID pub raw: [u8; 0x10], } impl SystemUuid { /// Low field of the timestamp pub fn time_low(&self) -> u32 { u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size")) } /// Middle field of the timestamp pub fn time_mid(&self) -> u16 { u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size")) } /// High field of the timestamp multiplexed with the version number pub fn time_high_and_version(&self) -> u16 { u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size")) } /// High field of the clock sequence multiplexed with the variant pub fn clock_seq_high_and_reserved(&self) -> u8 { self.raw[0x8] } /// Low field of the clock sequence pub fn clock_seq_low(&self) -> u8 { self.raw[0x9] } /// Spatially unique node identifier pub fn node(&self) -> &[u8; 6] { self.raw[0xA..0x10].try_into().expect("incorrect size") } } impl<'a> From<&'a [u8; 0x10]> for SystemUuid { fn from(raw: &'a [u8; 0x10]) -> Self { SystemUuid { raw: raw.clone() } } } impl fmt::Display for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Example output: // "00360FE7-D4D5-11E5-9C43-BC0000F00000" // <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]> write!( f, "{:08X}-{:04X}-{:04X}-{:02X}{:02X}-", self.time_low(), self.time_mid(), self.time_high_and_version(), self.clock_seq_high_and_reserved(), self.clock_seq_low() )?; self.node().iter().fold(Ok(()), |result, node_byte| { result.and_then(|_| write!(f, "{:02X}", node_byte)) }) } } impl fmt::Debug for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self) } } impl Serialize for SystemUuid { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(format!("{}", self).as_str()) } }
/// Raw value /// /// _raw_ is most useful when _value_ is None. /// This is most likely to occur when the standard was updated but /// this library code has not been updated to match the current /// standard. pub raw: u8, /// The contained [SystemWakeUpType] value pub value: SystemWakeUpType, } impl fmt::Debug for SystemWakeUpTypeData { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SystemWakeUpTypeData>()) .field("raw", &self.raw) .field("value", &self.value) .finish() } } impl Serialize for SystemWakeUpTypeData { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SystemWakeUpTypeData", 2)?; state.serialize_field("raw", &self.raw)?; state.serialize_field("value", &self.value)?; state.end() } } impl fmt::Display for SystemWakeUpTypeData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.value { SystemWakeUpType::None => write!(f, "{}", &self.raw), _ => write!(f, "{:?}", &self.value), } } } impl Deref for SystemWakeUpTypeData { type Target = SystemWakeUpType; fn deref(&self) -> &Self::Target { &self.value } } /// # System - Wake-up Type #[derive(Serialize, Debug, PartialEq, Eq)] pub enum SystemWakeUpType { /// Other Other, /// Unknown Unknown, /// APM Timer ApmTimer, /// Modem Ring ModernRing, /// LAN Remote LanRemote, /// Power Switch PowerSwitch, /// PCI PME# PciPme, /// AC Power Restored ACPowerRestored, /// A value unknown to this standard, check the raw value None, } impl From<u8> for SystemWakeUpTypeData { fn from(raw: u8) -> Self { SystemWakeUpTypeData { value: match raw { 0x01 => SystemWakeUpType::Other, 0x02 => SystemWakeUpType::Unknown, 0x03 => SystemWakeUpType::ApmTimer, 0x04 => SystemWakeUpType::ModernRing, 0x05 => SystemWakeUpType::LanRemote, 0x06 => SystemWakeUpType::PowerSwitch, 0x07 => SystemWakeUpType::PciPme, 0x08 => SystemWakeUpType::ACPowerRestored, _ => SystemWakeUpType::None, }, raw, } } } #[cfg(test)] mod tests { use super::*; #[test] fn unit_test() { let struct_type1 = vec![ 0x01, 0x1B, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0xD2, 0x01, 0x25, 0x3E, 0x48, 0xE6, 0x11, 0xE8, 0xBA, 0xD3, 0x70, 0x20, 0x84, 0x0F, 0x9D, 0x47, 0x06, 0x05, 0x06, b'L', b'E', b'N', b'O', b'V', b'O', 0x00, b'3', b'0', b'B', b'F', b'S', b'0', b'7', b'5', b'0', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'M', b'N', b'0', b'6', b'P', b'Q', b'R', b'S', 0x00, b'L', b'E', b'N', b'O', b'V', b'O', b'_', b'M', b'T', b'_', b'3', b'0', b'B', b'F', b'_', b'B', b'U', b'_', b'T', b'h', b'i', b'n', b'k', b'_', b'F', b'M', b'_', b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, 0x00, ]; let parts = UndefinedStruct::new(&struct_type1); let test_struct = SMBiosSystemInformation::new(&parts); assert_eq!(test_struct.manufacturer(), Some("LENOVO".to_string())); assert_eq!(test_struct.product_name(), Some("30BFS07500".to_string())); assert_eq!(test_struct.version(), Some("ThinkStation P520".to_string())); assert_eq!(test_struct.serial_number(), Some("MN06PQRS".to_string())); assert_eq!( format!("{:?}", test_struct.uuid()), "Some(Uuid(3E2501D2-E648-E811-BAD3-7020840F9D47))".to_string() ); assert_eq!( *test_struct.wakeup_type().unwrap(), SystemWakeUpType::PowerSwitch ); assert_eq!( test_struct.sku_number(), Some("LENOVO_MT_30BF_BU_Think_FM_ThinkStation P520".to_string()) ); assert_eq!(test_struct.family(), Some("ThinkStation P520".to_string())); } }
/// # System - Wake-up Type Data pub struct SystemWakeUpTypeData {
random_line_split
system_information.rs
use crate::{SMBiosStruct, UndefinedStruct}; use serde::{ser::SerializeStruct, Serialize, Serializer}; use core::{ array::TryFromSliceError, convert::{TryFrom, TryInto}, fmt, ops::Deref, any }; #[cfg(feature = "no_std")] use alloc::{string::String, format}; /// # System Information (Type 1) /// /// The information in this structure defines attributes of the overall system and is intended to be associated /// with the Component ID group of the system’s MIF. An SMBIOS implementation is associated with a single /// system instance and contains one and only one System Information (Type 1) structure. /// /// Compliant with: /// DMTF SMBIOS Reference Specification 3.4.0 (DSP0134) /// Document Date: 2020-07-17 pub struct SMBiosSystemInformation<'a> { parts: &'a UndefinedStruct, } impl<'a> SMBiosStruct<'a> for SMBiosSystemInformation<'a> { const STRUCT_TYPE: u8 = 1u8; fn new(parts: &'a UndefinedStruct) -> Self { Self { parts } } fn parts(&self) -> &'a UndefinedStruct { self.parts } } impl<'a> SMBiosSystemInformation<'a> { /// Manufacturer pub fn manufacturer(&self) -> Option<String> { self.parts.get_field_string(0x04) } /// Product name pub fn product_name(&self) -> Option<String> { self.parts.get_field_string(0x05) } /// Version pub fn version(&self) -> Option<String> { self.parts.get_field_string(0x06) } /// Serial number pub fn serial_number(&self) -> Option<String> { self.parts.get_field_string(0x07) } /// System UUID pub fn uuid(&self) -> Option<SystemUuidData> { self.parts .get_field_data(0x08, 0x18) .map(|raw| SystemUuidData::try_from(raw).expect("A GUID is 0x10 bytes")) } /// Wake-up type /// /// Identifies the event that caused the system to power up. pub fn wakeup_type(&self) -> Option<SystemWakeUpTypeData> { self.parts .get_field_byte(0x18) .map(|raw| SystemWakeUpTypeData::from(raw)) } /// SKU Number /// /// This text string identifies a particular computer /// configuration for sale. It is sometimes also /// called a product ID or purchase order number. /// This number is frequently found in existing /// fields, but there is no standard format. /// Typically for a given system board from a /// given OEM, there are tens of unique /// processor, memory, hard drive, and optical /// drive configurations. pub fn sku_number(&self) -> Option<String> { self.parts.get_field_string(0x19) } /// Family /// /// This text string identifies the family to which a /// particular computer belongs. A family refers to /// a set of computers that are similar but not /// identical from a hardware or software point of /// view. Typically, a family is composed of /// different computer models, which have /// different configurations and pricing points. /// Computers in the same family often have /// similar branding and cosmetic features. pub fn family(&self) -> Option<String> { self.parts.get_field_string(0x1A) } } impl fmt::Debug for SMBiosSystemInformation<'_> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SMBiosSystemInformation<'_>>()) .field("header", &self.parts.header) .field("manufacturer", &self.manufacturer()) .field("product_name", &self.product_name()) .field("version", &self.version()) .field("serial_number", &self.serial_number()) .field("uuid", &self.uuid()) .field("wakeup_type", &self.wakeup_type()) .field("sku_number", &self.sku_number()) .field("family", &self.family()) .finish() } } impl Serialize for SMBiosSystemInformation<'_> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SMBiosSystemInformation", 9)?; state.serialize_field("header", &self.parts.header)?; state.serialize_field("manufacturer", &self.manufacturer())?; state.serialize_field("product_name", &self.product_name())?; state.serialize_field("version", &self.version())?; state.serialize_field("serial_number", &self.serial_number())?; state.serialize_field("uuid", &self.uuid())?; state.serialize_field("wakeup_type", &self.wakeup_type())?; state.serialize_field("sku_number", &self.sku_number())?; state.serialize_field("family", &self.family())?; state.end() } } /// # System - UUID Data #[derive(Serialize, Debug)] pub enum SystemUuidData { /// The ID is not currently present in the system, but it can be set IdNotPresentButSettable, /// The ID is not present in the system IdNotPresent, /// System UUID Uuid(SystemUuid), } impl SystemUuidData { fn new<'a>(array: &'a [u8; 0x10]) -> SystemUuidData { if array.iter().all(|&x| x == 0) { SystemUuidData::IdNotPresentButSettable } else if array.iter().all(|&x| x == 0xFF) { SystemUuidData::IdNotPresent } else { SystemUuidData::Uuid(SystemUuid::from(array)) } } } impl<'a> TryFrom<&'a [u8]> for SystemUuidData { type Error = TryFromSliceError; fn try_from(raw: &'a [u8]) -> Result<Self, Self::Error> { <&[u8; 0x10]>::try_from(raw).and_then(|array| Ok(SystemUuidData::new(array))) } } impl fmt::Display for SystemUuidData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &*self { SystemUuidData::IdNotPresent => write!(f, "IdNotPresent"), SystemUuidData::IdNotPresentButSettable => write!(f, "IdNotPresentButSettable"), SystemUuidData::Uuid(_system_uuid) => write!(f, "{}", &_system_uuid), } } } /// # System - UUID #[derive(PartialEq, Eq)] pub struct SystemUuid { /// Raw byte array for this UUID pub raw: [u8; 0x10], } impl SystemUuid { /// Low field of the timestamp pub fn time_low(&self) -> u32 { u32::from_le_bytes(self.raw[..0x4].try_into().expect("incorrect size")) } /// Middle field of the timestamp pub fn time_mid(&self) -> u16 { u16::from_le_bytes(self.raw[0x4..0x6].try_into().expect("incorrect size")) } /// High field of the timestamp multiplexed with the version number pub fn time_high_and_version(&self) -> u16 { u16::from_le_bytes(self.raw[0x6..0x8].try_into().expect("incorrect size")) } /// High field of the clock sequence multiplexed with the variant pub fn clock_seq_high_and_reserved(&self) -> u8 { self.raw[0x8] } /// Low field of the clock sequence pub fn clock_seq_low(&self) -> u8 { self.raw[0x9] } /// Spatially unique node identifier pub fn node(&self) -> &[u8; 6] { self.raw[0xA..0x10].try_into().expect("incorrect size") } } impl<'a> From<&'a [u8; 0x10]> for SystemUuid { fn from(raw: &'a [u8; 0x10]) -> Self { SystemUuid { raw: raw.clone() } } } impl fmt::Display for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Example output: // "00360FE7-D4D5-11E5-9C43-BC0000F00000" // <TimeLow>-<TimeMid>-<TimeHiAndVersion>-<ClockSeqHiAndReserved><ClockSeqLow>-<Node[6]> write!( f, "{:08X}-{:04X}-{:04X}-{:02X}{:02X}-", self.time_low(), self.time_mid(), self.time_high_and_version(), self.clock_seq_high_and_reserved(), self.clock_seq_low() )?; self.node().iter().fold(Ok(()), |result, node_byte| { result.and_then(|_| write!(f, "{:02X}", node_byte)) }) } } impl fmt::Debug for SystemUuid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", &self) } } impl Serialize for SystemUuid { fn se
>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_str(format!("{}", self).as_str()) } } /// # System - Wake-up Type Data pub struct SystemWakeUpTypeData { /// Raw value /// /// _raw_ is most useful when _value_ is None. /// This is most likely to occur when the standard was updated but /// this library code has not been updated to match the current /// standard. pub raw: u8, /// The contained [SystemWakeUpType] value pub value: SystemWakeUpType, } impl fmt::Debug for SystemWakeUpTypeData { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct(any::type_name::<SystemWakeUpTypeData>()) .field("raw", &self.raw) .field("value", &self.value) .finish() } } impl Serialize for SystemWakeUpTypeData { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state = serializer.serialize_struct("SystemWakeUpTypeData", 2)?; state.serialize_field("raw", &self.raw)?; state.serialize_field("value", &self.value)?; state.end() } } impl fmt::Display for SystemWakeUpTypeData { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.value { SystemWakeUpType::None => write!(f, "{}", &self.raw), _ => write!(f, "{:?}", &self.value), } } } impl Deref for SystemWakeUpTypeData { type Target = SystemWakeUpType; fn deref(&self) -> &Self::Target { &self.value } } /// # System - Wake-up Type #[derive(Serialize, Debug, PartialEq, Eq)] pub enum SystemWakeUpType { /// Other Other, /// Unknown Unknown, /// APM Timer ApmTimer, /// Modem Ring ModernRing, /// LAN Remote LanRemote, /// Power Switch PowerSwitch, /// PCI PME# PciPme, /// AC Power Restored ACPowerRestored, /// A value unknown to this standard, check the raw value None, } impl From<u8> for SystemWakeUpTypeData { fn from(raw: u8) -> Self { SystemWakeUpTypeData { value: match raw { 0x01 => SystemWakeUpType::Other, 0x02 => SystemWakeUpType::Unknown, 0x03 => SystemWakeUpType::ApmTimer, 0x04 => SystemWakeUpType::ModernRing, 0x05 => SystemWakeUpType::LanRemote, 0x06 => SystemWakeUpType::PowerSwitch, 0x07 => SystemWakeUpType::PciPme, 0x08 => SystemWakeUpType::ACPowerRestored, _ => SystemWakeUpType::None, }, raw, } } } #[cfg(test)] mod tests { use super::*; #[test] fn unit_test() { let struct_type1 = vec![ 0x01, 0x1B, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0xD2, 0x01, 0x25, 0x3E, 0x48, 0xE6, 0x11, 0xE8, 0xBA, 0xD3, 0x70, 0x20, 0x84, 0x0F, 0x9D, 0x47, 0x06, 0x05, 0x06, b'L', b'E', b'N', b'O', b'V', b'O', 0x00, b'3', b'0', b'B', b'F', b'S', b'0', b'7', b'5', b'0', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'M', b'N', b'0', b'6', b'P', b'Q', b'R', b'S', 0x00, b'L', b'E', b'N', b'O', b'V', b'O', b'_', b'M', b'T', b'_', b'3', b'0', b'B', b'F', b'_', b'B', b'U', b'_', b'T', b'h', b'i', b'n', b'k', b'_', b'F', b'M', b'_', b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, b'T', b'h', b'i', b'n', b'k', b'S', b't', b'a', b't', b'i', b'o', b'n', b' ', b'P', b'5', b'2', b'0', 0x00, 0x00, ]; let parts = UndefinedStruct::new(&struct_type1); let test_struct = SMBiosSystemInformation::new(&parts); assert_eq!(test_struct.manufacturer(), Some("LENOVO".to_string())); assert_eq!(test_struct.product_name(), Some("30BFS07500".to_string())); assert_eq!(test_struct.version(), Some("ThinkStation P520".to_string())); assert_eq!(test_struct.serial_number(), Some("MN06PQRS".to_string())); assert_eq!( format!("{:?}", test_struct.uuid()), "Some(Uuid(3E2501D2-E648-E811-BAD3-7020840F9D47))".to_string() ); assert_eq!( *test_struct.wakeup_type().unwrap(), SystemWakeUpType::PowerSwitch ); assert_eq!( test_struct.sku_number(), Some("LENOVO_MT_30BF_BU_Think_FM_ThinkStation P520".to_string()) ); assert_eq!(test_struct.family(), Some("ThinkStation P520".to_string())); } }
rialize<S
identifier_name
loader.rs
extern crate xml; use std; use std::fs::File; use std::path::{Path, PathBuf}; use std::ffi::OsStr; use std::io::{BufReader, Cursor, Read}; use std::sync::mpsc::Sender; use std::collections::HashMap; use zip::read::{ZipArchive, ZipFile}; use loader::xml::reader::{EventReader, XmlEvent}; use rodio::source::Source; use sdl2::rwops::RWops; use sdl2::image::ImageRWops; //use sdl2::surface::SurfaceContext; use mp3::Mp3Decoder; use songs::Song; use surface::Surface; use Result; pub enum LoadStatus { TotalSize(u64), LoadSize(u64), Done(ResPack), } // SDL2-rust implementation of surface isn't threadsafe for some reason pub struct ResPack { pub info: PackInfo, pub images: Vec<ImageLoader>, pub songs: Vec<Song>, } pub struct ImageLoader { //data: SurfaceContext pub name: String, pub fullname: Option<String>, pub data: Surface, pub source: Option<String>, pub source_other: Option<String>, } pub struct SongData { pub name: String, pub title: String, pub source: Option<String>, pub rhythm: Vec<char>, pub buildup: Option<String>, pub buildup_rhythm: Vec<char>, } impl ImageLoader { fn new(name: &str, buffer: Surface) -> Self { ImageLoader { name: name.to_owned(), data: buffer, fullname: None, source: None, source_other: None, } } fn add_data(&mut self, data: ImageData) { self.fullname = data.fullname; self.source = data.source; self.source_other = data.source_other; } } struct ImageData { filename: String, fullname: Option<String>, source: Option<String>, source_other: Option<String>, // align // frameDuration } #[derive(Debug, Default)] pub struct PackInfo { name: String, author: Option<String>, description: Option<String>, link: Option<String> } impl PackInfo { fn new(name: &str) -> Self { PackInfo { name: name.to_owned(), ..Default::default() } } } pub fn load_respack<T: AsRef<Path>>(path: T, tx: Sender<LoadStatus>) -> Result<()> { let path = path.as_ref(); let f = File::open(path)?; let total_size = f.metadata()?.len(); tx.send(LoadStatus::TotalSize(total_size))?; let mut archive = ZipArchive::new(f)?; let mut images: HashMap<String, ImageLoader> = HashMap::new(); let mut audio: HashMap<String, _> = HashMap::new(); let mut song_data = Vec::new(); let mut image_data = Vec::new(); let mut pack_info = PackInfo::new(path.file_stem().and_then(OsStr::to_str).unwrap_or("???")); let mut loaded_size = 0; for i in 0..archive.len() { let mut file = archive.by_index(i)?; let path: PathBuf = file.name().into(); let size = file.compressed_size(); let name: &str = path.file_stem().and_then(OsStr::to_str).ok_or_else(|| "Bad path")?; match path.extension().and_then(OsStr::to_str) { Some("png") => { let surface = { let mut buffer = Vec::with_capacity(file.size() as usize); file.read_to_end(&mut buffer)?; let rwops = RWops::from_bytes(&buffer[..])?; let surface = rwops.load_png()?; Surface::from_surface(surface)? }; let image = ImageLoader::new(name, surface); images.insert(name.to_owned(), image); } Some("mp3") => { let mut data = Vec::with_capacity(file.size() as usize); file.read_to_end(&mut data)?; let decoder = Mp3Decoder::new(Cursor::new(data)); let source = (Box::new(decoder) as Box<Source<Item = i16> + Send>).buffered(); audio.insert(name.to_owned(), source); } Some("xml") => { parse_xml(file, &mut song_data, &mut image_data, &mut pack_info); } Some("") => {}, _ => println!("{:?}", path), } tx.send(LoadStatus::LoadSize(size))?; loaded_size += size; } // Leftovers tx.send(LoadStatus::LoadSize(total_size - loaded_size))?; // Process songs let songs: Vec<Song> = song_data .into_iter() .filter_map(|data| Song::new(data, &mut audio).ok()) .collect(); if!audio.is_empty() { println!("Warning: Unused audio data {:?}", audio.keys()); } // Process images for image in image_data.into_iter() { if let Some(loader) = images.get_mut(&image.filename) { loader.add_data(image); } else { println!("Warning: Could not find image {}", image.filename); } } tx.send(LoadStatus::Done(ResPack { info: pack_info, images: images.into_iter().map(|(_k, v)| v).collect(), songs, }))?; Ok(()) } // XML // tempted to try and write a macro to handle this // maybe if it grows some more enum State { Document, Songs, Song(Option<SongField>), Images, Image(Option<ImageField>), Info(Option<InfoField>), } #[derive(Copy, Clone, Debug)] enum SongField { Title, Source, Rhythm, Buildup, BuildupRhythm, } #[derive(Copy, Clone, Debug)] enum ImageField { Source, SourceOther, FullName, Align, FrameDuration, // TODO: handle animations } #[derive(Copy, Clone, Debug)] enum InfoField { Name, Author, Description, Link, } // based off code from stebalien on rust-lang // ok this got ugly, clean it up fn parse_xml(file: ZipFile, songs: &mut Vec<SongData>, images: &mut Vec<ImageData>, pack_info: &mut PackInfo) { let mut reader = EventReader::new(BufReader::new(file)); let mut state = State::Document; let mut song_name = None; let mut song_title = None; let mut song_source = None; let mut song_rhythm = Vec::new(); let mut song_buildup = None; let mut song_buildup_rhythm = Vec::new(); let mut image_filename = None; let mut image_name = None; let mut image_source = None; let mut image_source_other = None; // TODO: handle smart align //let mut image_align = None; while let Ok(event) = reader.next() { state = match state { State::Document => match event { XmlEvent::StartDocument {.. } => State::Document, XmlEvent::StartElement { name,.. } => match name.local_name.as_ref() { "info" => State::Info(None), "songs" => State::Songs, "images" => State::Images, _ => { println!("Unknown xml tag {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Document } }, XmlEvent::EndDocument => break, _ => { println!("Unexpected"); State::Document } }, State::Songs => match event { XmlEvent::StartElement { name, attributes,.. } => { if name.local_name!= "song" { panic!("Expected a song tag - got {}", name.local_name); } for attr in attributes.into_iter() { if attr.name.local_name == "name" { song_name = Some(attr.value); break; } } if song_name.is_none() { panic!("Expected a song name"); } State::Song(None) } XmlEvent::EndElement {.. } => State::Document, XmlEvent::Whitespace(_) => State::Songs, _ => { println!("Expected a song tag - got {:?}", event); State::Songs } }, State::Song(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "title" => State::Song(Some(SongField::Title)), "source" => State::Song(Some(SongField::Source)), "rhythm" => State::Song(Some(SongField::Rhythm)),
println!("Unknown song field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Song(None) } }, XmlEvent::EndElement {.. } => { if song_rhythm.is_empty() { // TODO: be graceful panic!("Empty rhythm"); } let song = SongData { name: song_name.take().unwrap(), title: song_title.take().unwrap(), source: song_source.take(), rhythm: std::mem::replace(&mut song_rhythm, Vec::new()), buildup: song_buildup.take(), buildup_rhythm: std::mem::replace(&mut song_buildup_rhythm, Vec::new()), }; songs.push(song); State::Songs } _ => State::Song(None), }, State::Song(Some(field)) => match event { XmlEvent::Characters(data) => { match field { SongField::Title => song_title = Some(data), SongField::Source => song_source = Some(data), SongField::Rhythm => { if!data.is_ascii() { panic!("Expected ascii characters in rhythm"); } song_rhythm = data.chars().collect(); } SongField::Buildup => song_buildup = Some(data), SongField::BuildupRhythm => { if!data.is_ascii() { panic!("Expected ascii characters in rhythm"); } if data.is_empty() { panic!("Buildup rhythm empty!"); } song_buildup_rhythm = data.chars().collect(); } } State::Song(Some(field)) } XmlEvent::EndElement {.. } => State::Song(None), _ => panic!("Expected data for tag {:?}", field), }, State::Images => match event { XmlEvent::StartElement { name, attributes,.. } => { if name.local_name!= "image" { panic!("Expected an image tag - got {}", name.local_name); } for attr in attributes.into_iter() { if attr.name.local_name == "name" { image_filename = Some(attr.value); break; } } if image_filename.is_none() { panic!("Expected an image name"); } State::Image(None) } XmlEvent::EndElement {.. } => State::Document, XmlEvent::Whitespace(_) => State::Images, _ => panic!("Expected an image tag - got {:?}", event), }, State::Image(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "source" => State::Image(Some(ImageField::Source)), "source_other" => State::Image(Some(ImageField::SourceOther)), "fullname" => State::Image(Some(ImageField::FullName)), "align" => State::Image(Some(ImageField::Align)), "frameDuration" => State::Image(Some(ImageField::FrameDuration)), _ => { println!("Unknown image field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Image(None) } }, XmlEvent::EndElement {.. } => { let image = ImageData { filename: image_filename.take().unwrap(), fullname: image_name.take(), source: image_source.take(), source_other: image_source_other.take(), }; images.push(image); State::Images } _ => State::Image(None), }, State::Image(Some(field)) => match event { XmlEvent::Characters(data) => { match field { ImageField::Source => image_source = Some(data), ImageField::SourceOther => image_source_other = Some(data), ImageField::FullName => image_name = Some(data), ImageField::Align => {} ImageField::FrameDuration => {} } State::Image(Some(field)) } XmlEvent::EndElement {.. } => State::Image(None), _ => panic!("Expected data for tag {:?}", field), }, State::Info(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "name" => State::Info(Some(InfoField::Name)), "author" => State::Info(Some(InfoField::Author)), "description" => State::Info(Some(InfoField::Description)), "link" => State::Info(Some(InfoField::Link)), _ => { println!("Unknown info field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Info(None) } }, XmlEvent::EndElement {.. } => State::Document, _ => State::Info(None), }, State::Info(Some(field)) => match event { XmlEvent::Characters(data) => { match field { InfoField::Name => pack_info.name = data, InfoField::Author => pack_info.author = Some(data), InfoField::Description => pack_info.description = Some(data), InfoField::Link => pack_info.link = Some(data), } State::Info(Some(field)) } XmlEvent::EndElement {.. } => State::Info(None), _ => { println!("Expected data for tag {:?}", field); State::Info(Some(field)) } } } } } fn xml_skip_tag<R: Read>(reader: &mut EventReader<R>) -> Result<()> { let mut depth = 1; while depth > 0 { match reader.next() { Ok(XmlEvent::StartElement {.. }) => depth += 1, Ok(XmlEvent::EndElement {.. }) => depth -= 1, Ok(_event) => {} _ => return Err("Unexpected event error".into()), } } Ok(()) }
"buildup" => State::Song(Some(SongField::Buildup)), "buildupRhythm" => State::Song(Some(SongField::BuildupRhythm)), _ => {
random_line_split
loader.rs
extern crate xml; use std; use std::fs::File; use std::path::{Path, PathBuf}; use std::ffi::OsStr; use std::io::{BufReader, Cursor, Read}; use std::sync::mpsc::Sender; use std::collections::HashMap; use zip::read::{ZipArchive, ZipFile}; use loader::xml::reader::{EventReader, XmlEvent}; use rodio::source::Source; use sdl2::rwops::RWops; use sdl2::image::ImageRWops; //use sdl2::surface::SurfaceContext; use mp3::Mp3Decoder; use songs::Song; use surface::Surface; use Result; pub enum LoadStatus { TotalSize(u64), LoadSize(u64), Done(ResPack), } // SDL2-rust implementation of surface isn't threadsafe for some reason pub struct
{ pub info: PackInfo, pub images: Vec<ImageLoader>, pub songs: Vec<Song>, } pub struct ImageLoader { //data: SurfaceContext pub name: String, pub fullname: Option<String>, pub data: Surface, pub source: Option<String>, pub source_other: Option<String>, } pub struct SongData { pub name: String, pub title: String, pub source: Option<String>, pub rhythm: Vec<char>, pub buildup: Option<String>, pub buildup_rhythm: Vec<char>, } impl ImageLoader { fn new(name: &str, buffer: Surface) -> Self { ImageLoader { name: name.to_owned(), data: buffer, fullname: None, source: None, source_other: None, } } fn add_data(&mut self, data: ImageData) { self.fullname = data.fullname; self.source = data.source; self.source_other = data.source_other; } } struct ImageData { filename: String, fullname: Option<String>, source: Option<String>, source_other: Option<String>, // align // frameDuration } #[derive(Debug, Default)] pub struct PackInfo { name: String, author: Option<String>, description: Option<String>, link: Option<String> } impl PackInfo { fn new(name: &str) -> Self { PackInfo { name: name.to_owned(), ..Default::default() } } } pub fn load_respack<T: AsRef<Path>>(path: T, tx: Sender<LoadStatus>) -> Result<()> { let path = path.as_ref(); let f = File::open(path)?; let total_size = f.metadata()?.len(); tx.send(LoadStatus::TotalSize(total_size))?; let mut archive = ZipArchive::new(f)?; let mut images: HashMap<String, ImageLoader> = HashMap::new(); let mut audio: HashMap<String, _> = HashMap::new(); let mut song_data = Vec::new(); let mut image_data = Vec::new(); let mut pack_info = PackInfo::new(path.file_stem().and_then(OsStr::to_str).unwrap_or("???")); let mut loaded_size = 0; for i in 0..archive.len() { let mut file = archive.by_index(i)?; let path: PathBuf = file.name().into(); let size = file.compressed_size(); let name: &str = path.file_stem().and_then(OsStr::to_str).ok_or_else(|| "Bad path")?; match path.extension().and_then(OsStr::to_str) { Some("png") => { let surface = { let mut buffer = Vec::with_capacity(file.size() as usize); file.read_to_end(&mut buffer)?; let rwops = RWops::from_bytes(&buffer[..])?; let surface = rwops.load_png()?; Surface::from_surface(surface)? }; let image = ImageLoader::new(name, surface); images.insert(name.to_owned(), image); } Some("mp3") => { let mut data = Vec::with_capacity(file.size() as usize); file.read_to_end(&mut data)?; let decoder = Mp3Decoder::new(Cursor::new(data)); let source = (Box::new(decoder) as Box<Source<Item = i16> + Send>).buffered(); audio.insert(name.to_owned(), source); } Some("xml") => { parse_xml(file, &mut song_data, &mut image_data, &mut pack_info); } Some("") => {}, _ => println!("{:?}", path), } tx.send(LoadStatus::LoadSize(size))?; loaded_size += size; } // Leftovers tx.send(LoadStatus::LoadSize(total_size - loaded_size))?; // Process songs let songs: Vec<Song> = song_data .into_iter() .filter_map(|data| Song::new(data, &mut audio).ok()) .collect(); if!audio.is_empty() { println!("Warning: Unused audio data {:?}", audio.keys()); } // Process images for image in image_data.into_iter() { if let Some(loader) = images.get_mut(&image.filename) { loader.add_data(image); } else { println!("Warning: Could not find image {}", image.filename); } } tx.send(LoadStatus::Done(ResPack { info: pack_info, images: images.into_iter().map(|(_k, v)| v).collect(), songs, }))?; Ok(()) } // XML // tempted to try and write a macro to handle this // maybe if it grows some more enum State { Document, Songs, Song(Option<SongField>), Images, Image(Option<ImageField>), Info(Option<InfoField>), } #[derive(Copy, Clone, Debug)] enum SongField { Title, Source, Rhythm, Buildup, BuildupRhythm, } #[derive(Copy, Clone, Debug)] enum ImageField { Source, SourceOther, FullName, Align, FrameDuration, // TODO: handle animations } #[derive(Copy, Clone, Debug)] enum InfoField { Name, Author, Description, Link, } // based off code from stebalien on rust-lang // ok this got ugly, clean it up fn parse_xml(file: ZipFile, songs: &mut Vec<SongData>, images: &mut Vec<ImageData>, pack_info: &mut PackInfo) { let mut reader = EventReader::new(BufReader::new(file)); let mut state = State::Document; let mut song_name = None; let mut song_title = None; let mut song_source = None; let mut song_rhythm = Vec::new(); let mut song_buildup = None; let mut song_buildup_rhythm = Vec::new(); let mut image_filename = None; let mut image_name = None; let mut image_source = None; let mut image_source_other = None; // TODO: handle smart align //let mut image_align = None; while let Ok(event) = reader.next() { state = match state { State::Document => match event { XmlEvent::StartDocument {.. } => State::Document, XmlEvent::StartElement { name,.. } => match name.local_name.as_ref() { "info" => State::Info(None), "songs" => State::Songs, "images" => State::Images, _ => { println!("Unknown xml tag {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Document } }, XmlEvent::EndDocument => break, _ => { println!("Unexpected"); State::Document } }, State::Songs => match event { XmlEvent::StartElement { name, attributes,.. } => { if name.local_name!= "song" { panic!("Expected a song tag - got {}", name.local_name); } for attr in attributes.into_iter() { if attr.name.local_name == "name" { song_name = Some(attr.value); break; } } if song_name.is_none() { panic!("Expected a song name"); } State::Song(None) } XmlEvent::EndElement {.. } => State::Document, XmlEvent::Whitespace(_) => State::Songs, _ => { println!("Expected a song tag - got {:?}", event); State::Songs } }, State::Song(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "title" => State::Song(Some(SongField::Title)), "source" => State::Song(Some(SongField::Source)), "rhythm" => State::Song(Some(SongField::Rhythm)), "buildup" => State::Song(Some(SongField::Buildup)), "buildupRhythm" => State::Song(Some(SongField::BuildupRhythm)), _ => { println!("Unknown song field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Song(None) } }, XmlEvent::EndElement {.. } => { if song_rhythm.is_empty() { // TODO: be graceful panic!("Empty rhythm"); } let song = SongData { name: song_name.take().unwrap(), title: song_title.take().unwrap(), source: song_source.take(), rhythm: std::mem::replace(&mut song_rhythm, Vec::new()), buildup: song_buildup.take(), buildup_rhythm: std::mem::replace(&mut song_buildup_rhythm, Vec::new()), }; songs.push(song); State::Songs } _ => State::Song(None), }, State::Song(Some(field)) => match event { XmlEvent::Characters(data) => { match field { SongField::Title => song_title = Some(data), SongField::Source => song_source = Some(data), SongField::Rhythm => { if!data.is_ascii() { panic!("Expected ascii characters in rhythm"); } song_rhythm = data.chars().collect(); } SongField::Buildup => song_buildup = Some(data), SongField::BuildupRhythm => { if!data.is_ascii() { panic!("Expected ascii characters in rhythm"); } if data.is_empty() { panic!("Buildup rhythm empty!"); } song_buildup_rhythm = data.chars().collect(); } } State::Song(Some(field)) } XmlEvent::EndElement {.. } => State::Song(None), _ => panic!("Expected data for tag {:?}", field), }, State::Images => match event { XmlEvent::StartElement { name, attributes,.. } => { if name.local_name!= "image" { panic!("Expected an image tag - got {}", name.local_name); } for attr in attributes.into_iter() { if attr.name.local_name == "name" { image_filename = Some(attr.value); break; } } if image_filename.is_none() { panic!("Expected an image name"); } State::Image(None) } XmlEvent::EndElement {.. } => State::Document, XmlEvent::Whitespace(_) => State::Images, _ => panic!("Expected an image tag - got {:?}", event), }, State::Image(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "source" => State::Image(Some(ImageField::Source)), "source_other" => State::Image(Some(ImageField::SourceOther)), "fullname" => State::Image(Some(ImageField::FullName)), "align" => State::Image(Some(ImageField::Align)), "frameDuration" => State::Image(Some(ImageField::FrameDuration)), _ => { println!("Unknown image field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Image(None) } }, XmlEvent::EndElement {.. } => { let image = ImageData { filename: image_filename.take().unwrap(), fullname: image_name.take(), source: image_source.take(), source_other: image_source_other.take(), }; images.push(image); State::Images } _ => State::Image(None), }, State::Image(Some(field)) => match event { XmlEvent::Characters(data) => { match field { ImageField::Source => image_source = Some(data), ImageField::SourceOther => image_source_other = Some(data), ImageField::FullName => image_name = Some(data), ImageField::Align => {} ImageField::FrameDuration => {} } State::Image(Some(field)) } XmlEvent::EndElement {.. } => State::Image(None), _ => panic!("Expected data for tag {:?}", field), }, State::Info(None) => match event { XmlEvent::StartElement { ref name,.. } => match name.local_name.as_ref() { "name" => State::Info(Some(InfoField::Name)), "author" => State::Info(Some(InfoField::Author)), "description" => State::Info(Some(InfoField::Description)), "link" => State::Info(Some(InfoField::Link)), _ => { println!("Unknown info field {}", name.local_name); xml_skip_tag(&mut reader).unwrap(); State::Info(None) } }, XmlEvent::EndElement {.. } => State::Document, _ => State::Info(None), }, State::Info(Some(field)) => match event { XmlEvent::Characters(data) => { match field { InfoField::Name => pack_info.name = data, InfoField::Author => pack_info.author = Some(data), InfoField::Description => pack_info.description = Some(data), InfoField::Link => pack_info.link = Some(data), } State::Info(Some(field)) } XmlEvent::EndElement {.. } => State::Info(None), _ => { println!("Expected data for tag {:?}", field); State::Info(Some(field)) } } } } } fn xml_skip_tag<R: Read>(reader: &mut EventReader<R>) -> Result<()> { let mut depth = 1; while depth > 0 { match reader.next() { Ok(XmlEvent::StartElement {.. }) => depth += 1, Ok(XmlEvent::EndElement {.. }) => depth -= 1, Ok(_event) => {} _ => return Err("Unexpected event error".into()), } } Ok(()) }
ResPack
identifier_name
lib.rs
/////////////////////////////////////////////////////////////////////////////// // // Copyright 2018-2021 Robonomics Network <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// //! The Robonomics runtime module. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] pub mod economics; pub mod signed; pub mod technics; pub mod traits; pub use pallet::*; pub use signed::*; pub use traits::*; #[frame_support::pallet] pub mod pallet { use super::traits::*; use frame_support::{dispatch, pallet_prelude::*}; use frame_system::pallet_prelude::*; use sp_std::prelude::*; #[pallet::config] pub trait Config: frame_system::Config { /// How to make and process agreement between two parties. type Agreement: dispatch::Parameter + Processing + Agreement<Self::AccountId>; /// How to report of agreement execution. type Report: dispatch::Parameter + Report<Self::Index, Self::AccountId>; /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; } pub type TechnicsFor<T> = <<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Technical; pub type EconomicsFor<T> = <<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Economical; pub type ReportFor<T> = <T as Config>::Report; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// Yay! New liability created. NewLiability( T::Index, TechnicsFor<T>, EconomicsFor<T>, T::AccountId, T::AccountId, ), /// Liability report published. NewReport(T::Index, ReportFor<T>), } #[pallet::error] pub enum Error<T> { /// Agreement proof verification failed. BadAgreementProof, /// Report proof verification failed. BadReportProof, /// Wrong report sender account. BadReportSender, /// Liability already finalized. AlreadyFinalized, /// Real world oracle is not ready for this report. OracleIsNotReady, /// Unable to load agreement from storage. AgreementNotFound, } #[pallet::storage] #[pallet::getter(fn latest_index)] /// [DEPRECATED] Latest liability index. /// TODO: remove after mainnet upgrade pub(super) type LatestIndex<T: Config> = StorageValue<_, T::Index>; #[pallet::storage] #[pallet::getter(fn next_index)] /// Next liability index. pub(super) type NextIndex<T: Config> = StorageValue<_, T::Index>; #[pallet::storage] #[pallet::getter(fn agreement_of)] /// Technical and economical parameters of liability. pub(super) type AgreementOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, T::Agreement>; #[pallet::storage] #[pallet::getter(fn report_of)] /// Result of liability execution. pub(super) type ReportOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, ReportFor<T>>; #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { // TODO: remove after mainnet upgrade fn on_runtime_upgrade() -> Weight { if <NextIndex<T>>::get().is_none() { if let Some(index) = <LatestIndex<T>>::take() { <NextIndex<T>>::put(index) } } 1 } } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] pub struct Pallet<T>(PhantomData<T>); #[pallet::call] impl<T: Config> Pallet<T> { /// Create agreement between two parties. #[pallet::weight(200_000)] pub fn create(origin: OriginFor<T>, agreement: T::Agreement) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; ensure!(agreement.verify(), Error::<T>::BadAgreementProof); // Start agreement processing agreement.on_start()?; // Store agreement on storage let next_index = <NextIndex<T>>::get().unwrap_or(Default::default()); <AgreementOf<T>>::insert(next_index, agreement.clone()); <NextIndex<T>>::put(next_index + 1u32.into()); // Emit event Self::deposit_event(Event::NewLiability( next_index, agreement.technical(), agreement.economical(), agreement.promisee(), agreement.promisor(), )); Ok(().into()) } /// Publish technical report of complite works. #[pallet::weight(200_000)] pub fn finalize(origin: OriginFor<T>, report: ReportFor<T>) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; // Check report proof ensure!(report.verify(), Error::<T>::BadReportProof); let index = report.index(); // Is liability already finalized? ensure!( <ReportOf<T>>::get(index) == None, Error::<T>::AlreadyFinalized ); // Decode agreement from storage if let Some(agreement) = <AgreementOf<T>>::get(index) { // Check report sender ensure!( report.sender() == agreement.promisor(), Error::<T>::BadReportSender ); // Run agreement final processing match report.is_confirmed() { None => Err(Error::<T>::OracleIsNotReady)?, Some(x) => agreement.on_finish(x)?, } // Store report on storage <ReportOf<T>>::insert(index, report.clone()); // Emit event Self::deposit_event(Event::NewReport(index, report)); Ok(().into()) } else { Err(Error::<T>::AgreementNotFound.into()) } } } } #[cfg(test)] mod tests { use crate::economics::SimpleMarket; use crate::signed::*; use crate::technics::IPFS; use crate::traits::*; use crate::{self as liability, *}; use frame_support::{assert_err, assert_ok, parameter_types}; use hex_literal::hex; use sp_core::{crypto::Pair, sr25519, H256}; use sp_keyring::AccountKeyring; use sp_runtime::{ testing::Header, traits::{IdentifyAccount, IdentityLookup, Verify}, AccountId32, MultiSignature, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; type Balance = u128; const XRT: Balance = 1_000_000_000; frame_support::construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Liability: liability::{Pallet, Call, Storage, Event<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; } impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; type SystemWeightInfo = (); type BlockWeights = (); type BlockLength = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } parameter_types! { pub const MaxLocks: u32 = 50; pub const MaxReserves: u32 = 50; pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } impl Config for Runtime { type Event = Event; type Agreement = SignedAgreement< // Provide task in IPFS IPFS, // Liability has a price SimpleMarket<Self::AccountId, Balances>, // Use standard accounts Self::AccountId, // Use standard signatures MultiSignature, >; type Report = SignedReport< // Indexing liabilities using system index Self::Index, // Use standard accounts Self::AccountId, // Use standard signatures MultiSignature, // Provide report in IPFS IPFS, >; } // IPFS raw hash (sha256) const IPFS_HASH: [u8; 32] = hex!["30f3d649b3d140a6601e11a2cfbe3560e60dc5434f62d702ac8ceff4e1890015"]; fn new_test_ext() -> sp_io::TestExternalities { let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let _ = pallet_balances::GenesisConfig::<Runtime> { balances: vec![ (AccountKeyring::Alice.into(), 100 * XRT), (AccountKeyring::Bob.into(), 100 * XRT), ], } .assimilate_storage(&mut storage); storage.into() } #[test] fn test_initial_setup() { new_test_ext().execute_with(|| { assert_eq!(Liability::next_index(), None); }); } fn
( uri: &str, technics: &TechnicsFor<Runtime>, economics: &EconomicsFor<Runtime>, ) -> (AccountId32, MultiSignature) { let pair = sr25519::Pair::from_string(uri, None).unwrap(); let sender = <MultiSignature as Verify>::Signer::from(pair.public()).into_account(); let signature = <ProofSigner<_> as AgreementProofBuilder<_, _, _, _>>::proof( technics, economics, &pair, ) .into(); (sender, signature) } fn get_report_proof(uri: &str, index: &u64, message: &IPFS) -> (AccountId32, MultiSignature) { let pair = sr25519::Pair::from_string(uri, None).unwrap(); let sender = <MultiSignature as Verify>::Signer::from(pair.public()).into_account(); let signature = <ProofSigner<_> as ReportProofBuilder<_, _, _, _>>::proof(index, message, &pair).into(); (sender, signature) } #[test] fn test_liability_proofs() { let technics = IPFS { hash: IPFS_HASH.into(), }; let economics = SimpleMarket { price: 10 }; let (sender, signature) = get_params_proof("//Alice", &technics, &economics); let agreement: <Runtime as Config>::Agreement = SignedAgreement { technics, economics, promisee: sender.clone(), promisor: sender.clone(), promisee_signature: signature.clone(), promisor_signature: signature.clone(), }; assert_eq!(agreement.verify(), true); let index = 1; let payload = IPFS { hash: IPFS_HASH.into(), }; let (sender, signature) = get_report_proof("//Alice", &index, &payload); let report = SignedReport { index, sender, payload, signature, }; assert_eq!(report.verify(), true); } #[test] fn test_liability_lifecycle() { new_test_ext().execute_with(|| { assert_eq!(Liability::next_index(), None); let technics = IPFS { hash: IPFS_HASH.into(), }; let economics = SimpleMarket { price: 10 * XRT }; let (alice, promisee_signature) = get_params_proof("//Alice", &technics, &economics); let (bob, promisor_signature) = get_params_proof("//Bob", &technics, &economics); assert_eq!(System::account(&alice).data.free, 100 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let agreement = SignedAgreement { technics, economics, promisee: alice.clone(), promisor: bob.clone(), promisee_signature: promisor_signature.clone(), promisor_signature, }; assert_err!( Liability::create( Origin::signed(agreement.promisor.clone()), agreement.clone() ), Error::<Runtime>::BadAgreementProof, ); assert_eq!(Liability::next_index(), None); assert_eq!(System::account(&alice).data.free, 100 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let agreement = SignedAgreement { promisee_signature, ..agreement }; assert_ok!(Liability::create( Origin::signed(agreement.promisor.clone()), agreement.clone() ),); assert_eq!(Liability::next_index(), Some(1)); assert_eq!(Liability::report_of(0), None); assert_eq!(Liability::agreement_of(0), Some(agreement)); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let index = 0; let payload = IPFS { hash: IPFS_HASH.into(), }; let (_, bad_signature) = get_report_proof("//Alice", &index, &payload); let (sender, signature) = get_report_proof("//Bob", &index, &payload); let report = SignedReport { index, sender, payload, signature: bad_signature, }; assert_err!( Liability::finalize(Origin::signed(report.sender.clone()), report.clone()), Error::<Runtime>::BadReportProof, ); assert_eq!(Liability::report_of(0), None); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let report = SignedReport { signature, ..report.clone() }; assert_ok!(Liability::finalize( Origin::signed(report.sender.clone()), report.clone() )); assert_eq!(Liability::report_of(0), Some(report)); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 110 * XRT); }) } }
get_params_proof
identifier_name
lib.rs
/////////////////////////////////////////////////////////////////////////////// // // Copyright 2018-2021 Robonomics Network <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// //! The Robonomics runtime module. This can be compiled with `#[no_std]`, ready for Wasm. #![cfg_attr(not(feature = "std"), no_std)] pub mod economics; pub mod signed; pub mod technics; pub mod traits; pub use pallet::*; pub use signed::*; pub use traits::*; #[frame_support::pallet] pub mod pallet { use super::traits::*; use frame_support::{dispatch, pallet_prelude::*}; use frame_system::pallet_prelude::*; use sp_std::prelude::*; #[pallet::config] pub trait Config: frame_system::Config {
/// How to report of agreement execution. type Report: dispatch::Parameter + Report<Self::Index, Self::AccountId>; /// The overarching event type. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; } pub type TechnicsFor<T> = <<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Technical; pub type EconomicsFor<T> = <<T as Config>::Agreement as Agreement<<T as frame_system::Config>::AccountId>>::Economical; pub type ReportFor<T> = <T as Config>::Report; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// Yay! New liability created. NewLiability( T::Index, TechnicsFor<T>, EconomicsFor<T>, T::AccountId, T::AccountId, ), /// Liability report published. NewReport(T::Index, ReportFor<T>), } #[pallet::error] pub enum Error<T> { /// Agreement proof verification failed. BadAgreementProof, /// Report proof verification failed. BadReportProof, /// Wrong report sender account. BadReportSender, /// Liability already finalized. AlreadyFinalized, /// Real world oracle is not ready for this report. OracleIsNotReady, /// Unable to load agreement from storage. AgreementNotFound, } #[pallet::storage] #[pallet::getter(fn latest_index)] /// [DEPRECATED] Latest liability index. /// TODO: remove after mainnet upgrade pub(super) type LatestIndex<T: Config> = StorageValue<_, T::Index>; #[pallet::storage] #[pallet::getter(fn next_index)] /// Next liability index. pub(super) type NextIndex<T: Config> = StorageValue<_, T::Index>; #[pallet::storage] #[pallet::getter(fn agreement_of)] /// Technical and economical parameters of liability. pub(super) type AgreementOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, T::Agreement>; #[pallet::storage] #[pallet::getter(fn report_of)] /// Result of liability execution. pub(super) type ReportOf<T: Config> = StorageMap<_, Twox64Concat, T::Index, ReportFor<T>>; #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> { // TODO: remove after mainnet upgrade fn on_runtime_upgrade() -> Weight { if <NextIndex<T>>::get().is_none() { if let Some(index) = <LatestIndex<T>>::take() { <NextIndex<T>>::put(index) } } 1 } } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] pub struct Pallet<T>(PhantomData<T>); #[pallet::call] impl<T: Config> Pallet<T> { /// Create agreement between two parties. #[pallet::weight(200_000)] pub fn create(origin: OriginFor<T>, agreement: T::Agreement) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; ensure!(agreement.verify(), Error::<T>::BadAgreementProof); // Start agreement processing agreement.on_start()?; // Store agreement on storage let next_index = <NextIndex<T>>::get().unwrap_or(Default::default()); <AgreementOf<T>>::insert(next_index, agreement.clone()); <NextIndex<T>>::put(next_index + 1u32.into()); // Emit event Self::deposit_event(Event::NewLiability( next_index, agreement.technical(), agreement.economical(), agreement.promisee(), agreement.promisor(), )); Ok(().into()) } /// Publish technical report of complite works. #[pallet::weight(200_000)] pub fn finalize(origin: OriginFor<T>, report: ReportFor<T>) -> DispatchResultWithPostInfo { let _ = ensure_signed(origin)?; // Check report proof ensure!(report.verify(), Error::<T>::BadReportProof); let index = report.index(); // Is liability already finalized? ensure!( <ReportOf<T>>::get(index) == None, Error::<T>::AlreadyFinalized ); // Decode agreement from storage if let Some(agreement) = <AgreementOf<T>>::get(index) { // Check report sender ensure!( report.sender() == agreement.promisor(), Error::<T>::BadReportSender ); // Run agreement final processing match report.is_confirmed() { None => Err(Error::<T>::OracleIsNotReady)?, Some(x) => agreement.on_finish(x)?, } // Store report on storage <ReportOf<T>>::insert(index, report.clone()); // Emit event Self::deposit_event(Event::NewReport(index, report)); Ok(().into()) } else { Err(Error::<T>::AgreementNotFound.into()) } } } } #[cfg(test)] mod tests { use crate::economics::SimpleMarket; use crate::signed::*; use crate::technics::IPFS; use crate::traits::*; use crate::{self as liability, *}; use frame_support::{assert_err, assert_ok, parameter_types}; use hex_literal::hex; use sp_core::{crypto::Pair, sr25519, H256}; use sp_keyring::AccountKeyring; use sp_runtime::{ testing::Header, traits::{IdentifyAccount, IdentityLookup, Verify}, AccountId32, MultiSignature, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>; type Block = frame_system::mocking::MockBlock<Runtime>; type Balance = u128; const XRT: Balance = 1_000_000_000; frame_support::construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Liability: liability::{Pallet, Call, Storage, Event<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; } impl frame_system::Config for Runtime { type Origin = Origin; type Index = u64; type BlockNumber = u64; type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId32; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; type Version = (); type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData<Balance>; type OnNewAccount = (); type OnKilledAccount = (); type DbWeight = (); type BaseCallFilter = frame_support::traits::Everything; type SystemWeightInfo = (); type BlockWeights = (); type BlockLength = (); type SS58Prefix = (); type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } parameter_types! { pub const MaxLocks: u32 = 50; pub const MaxReserves: u32 = 50; pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Config for Runtime { type MaxLocks = MaxLocks; type MaxReserves = MaxReserves; type ReserveIdentifier = [u8; 8]; type Balance = Balance; type Event = Event; type DustRemoval = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; type WeightInfo = (); } impl Config for Runtime { type Event = Event; type Agreement = SignedAgreement< // Provide task in IPFS IPFS, // Liability has a price SimpleMarket<Self::AccountId, Balances>, // Use standard accounts Self::AccountId, // Use standard signatures MultiSignature, >; type Report = SignedReport< // Indexing liabilities using system index Self::Index, // Use standard accounts Self::AccountId, // Use standard signatures MultiSignature, // Provide report in IPFS IPFS, >; } // IPFS raw hash (sha256) const IPFS_HASH: [u8; 32] = hex!["30f3d649b3d140a6601e11a2cfbe3560e60dc5434f62d702ac8ceff4e1890015"]; fn new_test_ext() -> sp_io::TestExternalities { let mut storage = frame_system::GenesisConfig::default() .build_storage::<Runtime>() .unwrap(); let _ = pallet_balances::GenesisConfig::<Runtime> { balances: vec![ (AccountKeyring::Alice.into(), 100 * XRT), (AccountKeyring::Bob.into(), 100 * XRT), ], } .assimilate_storage(&mut storage); storage.into() } #[test] fn test_initial_setup() { new_test_ext().execute_with(|| { assert_eq!(Liability::next_index(), None); }); } fn get_params_proof( uri: &str, technics: &TechnicsFor<Runtime>, economics: &EconomicsFor<Runtime>, ) -> (AccountId32, MultiSignature) { let pair = sr25519::Pair::from_string(uri, None).unwrap(); let sender = <MultiSignature as Verify>::Signer::from(pair.public()).into_account(); let signature = <ProofSigner<_> as AgreementProofBuilder<_, _, _, _>>::proof( technics, economics, &pair, ) .into(); (sender, signature) } fn get_report_proof(uri: &str, index: &u64, message: &IPFS) -> (AccountId32, MultiSignature) { let pair = sr25519::Pair::from_string(uri, None).unwrap(); let sender = <MultiSignature as Verify>::Signer::from(pair.public()).into_account(); let signature = <ProofSigner<_> as ReportProofBuilder<_, _, _, _>>::proof(index, message, &pair).into(); (sender, signature) } #[test] fn test_liability_proofs() { let technics = IPFS { hash: IPFS_HASH.into(), }; let economics = SimpleMarket { price: 10 }; let (sender, signature) = get_params_proof("//Alice", &technics, &economics); let agreement: <Runtime as Config>::Agreement = SignedAgreement { technics, economics, promisee: sender.clone(), promisor: sender.clone(), promisee_signature: signature.clone(), promisor_signature: signature.clone(), }; assert_eq!(agreement.verify(), true); let index = 1; let payload = IPFS { hash: IPFS_HASH.into(), }; let (sender, signature) = get_report_proof("//Alice", &index, &payload); let report = SignedReport { index, sender, payload, signature, }; assert_eq!(report.verify(), true); } #[test] fn test_liability_lifecycle() { new_test_ext().execute_with(|| { assert_eq!(Liability::next_index(), None); let technics = IPFS { hash: IPFS_HASH.into(), }; let economics = SimpleMarket { price: 10 * XRT }; let (alice, promisee_signature) = get_params_proof("//Alice", &technics, &economics); let (bob, promisor_signature) = get_params_proof("//Bob", &technics, &economics); assert_eq!(System::account(&alice).data.free, 100 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let agreement = SignedAgreement { technics, economics, promisee: alice.clone(), promisor: bob.clone(), promisee_signature: promisor_signature.clone(), promisor_signature, }; assert_err!( Liability::create( Origin::signed(agreement.promisor.clone()), agreement.clone() ), Error::<Runtime>::BadAgreementProof, ); assert_eq!(Liability::next_index(), None); assert_eq!(System::account(&alice).data.free, 100 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let agreement = SignedAgreement { promisee_signature, ..agreement }; assert_ok!(Liability::create( Origin::signed(agreement.promisor.clone()), agreement.clone() ),); assert_eq!(Liability::next_index(), Some(1)); assert_eq!(Liability::report_of(0), None); assert_eq!(Liability::agreement_of(0), Some(agreement)); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let index = 0; let payload = IPFS { hash: IPFS_HASH.into(), }; let (_, bad_signature) = get_report_proof("//Alice", &index, &payload); let (sender, signature) = get_report_proof("//Bob", &index, &payload); let report = SignedReport { index, sender, payload, signature: bad_signature, }; assert_err!( Liability::finalize(Origin::signed(report.sender.clone()), report.clone()), Error::<Runtime>::BadReportProof, ); assert_eq!(Liability::report_of(0), None); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 100 * XRT); let report = SignedReport { signature, ..report.clone() }; assert_ok!(Liability::finalize( Origin::signed(report.sender.clone()), report.clone() )); assert_eq!(Liability::report_of(0), Some(report)); assert_eq!(System::account(&alice).data.free, 90 * XRT); assert_eq!(System::account(&bob).data.free, 110 * XRT); }) } }
/// How to make and process agreement between two parties. type Agreement: dispatch::Parameter + Processing + Agreement<Self::AccountId>;
random_line_split
brush.rs
ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, // fullbright texture wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, ], &[ // lightmap texture array wgpu::BindGroupLayoutEntry { count: NonZeroU32::new(4), binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, }, ], ]; lazy_static! { static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] = wgpu::vertex_attr_array![ // position 0 => Float32x3, // normal 1 => Float32x3, // diffuse texcoord 2 => Float32x2, // lightmap texcoord 3 => Float32x2, // lightmap animation ids 4 => Uint8x4, ]; } impl Pipeline for BrushPipeline { type VertexPushConstants = VertexPushConstants; type SharedPushConstants = SharedPushConstants; type FragmentPushConstants = (); fn name() -> &'static str { "brush" } fn vertex_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert")) } fn fragment_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag")) } // NOTE: if any of the binding indices are changed, they must also be changed in // the corresponding shaders and the BindGroupLayout generation functions. fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> { vec![ // group 2: updated per-texture wgpu::BindGroupLayoutDescriptor { label: Some("brush per-texture bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[0], }, // group 3: updated per-face wgpu::BindGroupLayoutDescriptor { label: Some("brush per-face bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[1], }, ] } fn primitive_state() -> wgpu::PrimitiveState { WorldPipelineBase::primitive_state() } fn color_target_states() -> Vec<wgpu::ColorTargetState> { WorldPipelineBase::color_target_states() } fn depth_stencil_state() -> Option<wgpu::DepthStencilState> { WorldPipelineBase::depth_stencil_state() } // NOTE: if the vertex format is changed, this descriptor must also be changed accordingly. fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> { vec![wgpu::VertexBufferLayout { array_stride: size_of::<BrushVertex>() as u64, step_mode: wgpu::InputStepMode::Vertex, attributes: &VERTEX_ATTRIBUTES[..], }] } } fn calculate_lightmap_texcoords( position: Vector3<f32>, face: &BspFace, texinfo: &BspTexInfo, ) -> [f32; 2] { let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset; s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0; s += 0.5; s /= face.extents[0] as f32; let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset; t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0; t += 0.5; t /= face.extents[1] as f32; [s, t] } type Position = [f32; 3]; type Normal = [f32; 3]; type DiffuseTexcoord = [f32; 2]; type LightmapTexcoord = [f32; 2]; type LightmapAnim = [u8; 4]; #[repr(C)] #[derive(Clone, Copy, Debug)] struct BrushVertex { position: Position, normal: Normal, diffuse_texcoord: DiffuseTexcoord, lightmap_texcoord: LightmapTexcoord, lightmap_anim: LightmapAnim, } #[repr(u32)] #[derive(Clone, Copy, Debug)] pub enum TextureKind { Normal = 0, Warp = 1, Sky = 2, } /// A single frame of a brush texture. pub struct BrushTextureFrame { bind_group_id: usize, diffuse: wgpu::Texture, fullbright: wgpu::Texture, diffuse_view: wgpu::TextureView, fullbright_view: wgpu::TextureView, kind: TextureKind, } /// A brush texture. pub enum BrushTexture { /// A brush texture with a single frame. Static(BrushTextureFrame), /// A brush texture with multiple frames. /// /// Animated brush textures advance one frame every 200 milliseconds, i.e., /// they have a framerate of 5 fps. Animated { primary: Vec<BrushTextureFrame>, alternate: Option<Vec<BrushTextureFrame>>, }, } impl BrushTexture { fn kind(&self) -> TextureKind { match self { BrushTexture::Static(ref frame) => frame.kind, BrushTexture::Animated { ref primary,.. } => primary[0].kind, } } } #[derive(Debug)] struct BrushFace { vertices: Range<u32>, min: Vector3<f32>, max: Vector3<f32>, texture_id: usize, lightmap_ids: Vec<usize>, light_styles: [u8; 4], /// Indicates whether the face should be drawn this frame. /// /// This is set to false by default, and will be set to true if the model is /// a worldmodel and the containing leaf is in the PVS. If the model is not /// a worldmodel, this flag is ignored. draw_flag: Cell<bool>, } struct BrushLeaf { facelist_ids: Range<usize>, } impl<B> std::convert::From<B> for BrushLeaf where B: std::borrow::Borrow<BspLeaf>, { fn from(bsp_leaf: B) -> Self { let bsp_leaf = bsp_leaf.borrow(); BrushLeaf { facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count, } } } pub struct BrushRendererBuilder { bsp_data: Rc<BspData>, face_range: Range<usize>, leaves: Option<Vec<BrushLeaf>>, per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>, per_face_bind_groups: Vec<wgpu::BindGroup>, vertices: Vec<BrushVertex>, faces: Vec<BrushFace>, texture_chains: HashMap<usize, Vec<usize>>, textures: Vec<BrushTexture>, lightmaps: Vec<wgpu::Texture>, //lightmap_views: Vec<wgpu::TextureView>, } impl BrushRendererBuilder { pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder { BrushRendererBuilder { bsp_data: bsp_model.bsp_data().clone(), face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count, leaves: if worldmodel { Some( bsp_model .iter_leaves() .map(|leaf| BrushLeaf::from(leaf)) .collect(), ) } else { None }, per_texture_bind_groups: RefCell::new(Vec::new()), per_face_bind_groups: Vec::new(), vertices: Vec::new(), faces: Vec::new(), texture_chains: HashMap::new(), textures: Vec::new(), lightmaps: Vec::new(), //lightmap_views: Vec::new(), } } fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace { let face = &self.bsp_data.faces()[face_id]; let face_vert_id = self.vertices.len(); let texinfo = &self.bsp_data.texinfo()[face.texinfo_id]; let tex = &self.bsp_data.textures()[texinfo.tex_id]; let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY); let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY); let no_collinear = math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect()); for vert in no_collinear.iter() { for component in 0..3 { min[component] = min[component].min(vert[component]); max[component] = max[component].max(vert[component]); } } if tex.name().starts_with("*") { // tessellate the surface so we can do texcoord warping let verts = warp::subdivide(no_collinear); let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); for vert in verts.into_iter() { self.vertices.push(BrushVertex { position: vert.into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo), lightmap_anim: face.light_styles, }) } } else { // expand the vertices into a triangle list. // the vertices are guaranteed to be in valid triangle fan order (that's // how GLQuake renders them) so we expand from triangle fan to triangle // list order. // // v1 is the base vertex, so it remains constant. // v2 takes the previous value of v3. // v3 is the newest vertex. let verts = no_collinear; let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); let mut vert_iter = verts.into_iter(); let v1 = vert_iter.next().unwrap(); let mut v2 = vert_iter.next().unwrap(); for v3 in vert_iter { let tri = &[v1, v2, v3]; // skip collinear points for vert in tri.iter() { self.vertices.push(BrushVertex { position: (*vert).into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords( (*vert).into(), face, texinfo, ), lightmap_anim: face.light_styles, }); } v2 = v3; } } // build the lightmaps let lightmaps = if!texinfo.special { self.bsp_data.face_lightmaps(face_id) } else { Vec::new() }; let mut lightmap_ids = Vec::new(); for lightmap in lightmaps { let lightmap_data = TextureData::Lightmap(LightmapData {
let texture = state.create_texture(None, lightmap.width(), lightmap.height(), &lightmap_data); let id = self.lightmaps.len(); self.lightmaps.push(texture); //self.lightmap_views //.push(self.lightmaps[id].create_view(&Default::default())); lightmap_ids.push(id); } BrushFace { vertices: face_vert_id as u32..self.vertices.len() as u32, min, max, texture_id: texinfo.tex_id as usize, lightmap_ids, light_styles: face.light_styles, draw_flag: Cell::new(true), } } fn create_per_texture_bind_group( &self, state: &GraphicsState, tex: &BrushTextureFrame, ) -> wgpu::BindGroup { let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerTexture); let desc = wgpu::BindGroupDescriptor { label: Some("per-texture bind group"), layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&tex.diffuse_view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(&tex.fullbright_view), }, ], }; state.device().create_bind_group(&desc) } fn create_per_face_bind_group(&self, state: &GraphicsState, face_id: usize) -> wgpu::BindGroup { let mut lightmap_views: Vec<_> = self.faces[face_id] .lightmap_ids .iter() .map(|id| self.lightmaps[*id].create_view(&Default::default())) .collect(); lightmap_views.resize_with(4, || { state.default_lightmap().create_view(&Default::default()) }); let lightmap_view_refs = lightmap_views.iter().collect::<Vec<_>>(); let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerFace); let desc = wgpu::BindGroupDescriptor { label: Some("per-face bind group"), layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureViewArray(&lightmap_view_refs[..]), }], }; state.device().create_bind_group(&desc) } fn create_brush_texture_frame<S>( &self, state: &GraphicsState, mipmap: &[u8], width: u32, height: u32, name: S, ) -> BrushTextureFrame where S: AsRef<str>, { let name = name.as_ref(); let (diffuse_data, fullbright_data) = state.palette().translate(mipmap); let diffuse = state.create_texture(None, width, height, &TextureData::Diffuse(diffuse_data)); let fullbright = state.create_texture( None, width, height, &TextureData::Fullbright(fullbright_data), ); let diffuse_view = diffuse.create_view(&Default::default()); let fullbright_view = fullbright.create_view(&Default::default()); let kind = if name.starts_with("sky") { TextureKind::Sky } else if name.starts_with("*") { TextureKind::Warp } else { TextureKind::Normal }; let mut frame = BrushTextureFrame { bind_group_id: 0, diffuse, fullbright, diffuse_view, fullbright_view, kind, }; // generate texture bind group let per_texture_bind_group = self.create_per_texture_bind_group(state, &frame); let bind_group_id = self.per_texture_bind_groups.borrow().len(); self.per_texture_bind_groups .borrow_mut() .push(per_texture_bind_group); frame.bind_group_id = bind_group_id; frame } pub fn create_brush_texture(&self, state: &GraphicsState, tex: &BspTexture) -> BrushTexture { // TODO: upload mipmaps let (width, height) = tex.dimensions(); match tex.kind() { // sequence animated textures BspTextureKind::Animated { primary, alternate } => { let primary_frames: Vec<_> = primary .iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full), width, height, tex.name(), ) }) .collect(); let alternate_frames: Option<Vec<_>> = alternate.as_ref().map(|a| { a.iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full), width, height, tex.name(), ) })
lightmap: Cow::Borrowed(lightmap.data()), });
random_line_split
brush.rs
pub fn pipeline(&self) -> &wgpu::RenderPipeline { &self.pipeline } pub fn bind_group_layouts(&self) -> &[wgpu::BindGroupLayout] { &self.bind_group_layouts } pub fn bind_group_layout(&self, id: BindGroupLayoutId) -> &wgpu::BindGroupLayout { assert!(id as usize >= BindGroupLayoutId::PerTexture as usize); &self.bind_group_layouts[id as usize - BindGroupLayoutId::PerTexture as usize] } } #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct VertexPushConstants { pub transform: Matrix4<f32>, pub model_view: Matrix4<f32>, } #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct SharedPushConstants { pub texture_kind: u32, } const BIND_GROUP_LAYOUT_ENTRIES: &[&[wgpu::BindGroupLayoutEntry]] = &[ &[ // diffuse texture, updated once per face wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, // fullbright texture wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, ], &[ // lightmap texture array wgpu::BindGroupLayoutEntry { count: NonZeroU32::new(4), binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, }, ], ]; lazy_static! { static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] = wgpu::vertex_attr_array![ // position 0 => Float32x3, // normal 1 => Float32x3, // diffuse texcoord 2 => Float32x2, // lightmap texcoord 3 => Float32x2, // lightmap animation ids 4 => Uint8x4, ]; } impl Pipeline for BrushPipeline { type VertexPushConstants = VertexPushConstants; type SharedPushConstants = SharedPushConstants; type FragmentPushConstants = (); fn name() -> &'static str { "brush" } fn vertex_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert")) } fn fragment_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag")) } // NOTE: if any of the binding indices are changed, they must also be changed in // the corresponding shaders and the BindGroupLayout generation functions. fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> { vec![ // group 2: updated per-texture wgpu::BindGroupLayoutDescriptor { label: Some("brush per-texture bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[0], }, // group 3: updated per-face wgpu::BindGroupLayoutDescriptor { label: Some("brush per-face bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[1], }, ] } fn primitive_state() -> wgpu::PrimitiveState { WorldPipelineBase::primitive_state() } fn color_target_states() -> Vec<wgpu::ColorTargetState> { WorldPipelineBase::color_target_states() } fn depth_stencil_state() -> Option<wgpu::DepthStencilState> { WorldPipelineBase::depth_stencil_state() } // NOTE: if the vertex format is changed, this descriptor must also be changed accordingly. fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> { vec![wgpu::VertexBufferLayout { array_stride: size_of::<BrushVertex>() as u64, step_mode: wgpu::InputStepMode::Vertex, attributes: &VERTEX_ATTRIBUTES[..], }] } } fn calculate_lightmap_texcoords( position: Vector3<f32>, face: &BspFace, texinfo: &BspTexInfo, ) -> [f32; 2] { let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset; s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0; s += 0.5; s /= face.extents[0] as f32; let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset; t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0; t += 0.5; t /= face.extents[1] as f32; [s, t] } type Position = [f32; 3]; type Normal = [f32; 3]; type DiffuseTexcoord = [f32; 2]; type LightmapTexcoord = [f32; 2]; type LightmapAnim = [u8; 4]; #[repr(C)] #[derive(Clone, Copy, Debug)] struct BrushVertex { position: Position, normal: Normal, diffuse_texcoord: DiffuseTexcoord, lightmap_texcoord: LightmapTexcoord, lightmap_anim: LightmapAnim, } #[repr(u32)] #[derive(Clone, Copy, Debug)] pub enum TextureKind { Normal = 0, Warp = 1, Sky = 2, } /// A single frame of a brush texture. pub struct BrushTextureFrame { bind_group_id: usize, diffuse: wgpu::Texture, fullbright: wgpu::Texture, diffuse_view: wgpu::TextureView, fullbright_view: wgpu::TextureView, kind: TextureKind, } /// A brush texture. pub enum BrushTexture { /// A brush texture with a single frame. Static(BrushTextureFrame), /// A brush texture with multiple frames. /// /// Animated brush textures advance one frame every 200 milliseconds, i.e., /// they have a framerate of 5 fps. Animated { primary: Vec<BrushTextureFrame>, alternate: Option<Vec<BrushTextureFrame>>, }, } impl BrushTexture { fn kind(&self) -> TextureKind { match self { BrushTexture::Static(ref frame) => frame.kind, BrushTexture::Animated { ref primary,.. } => primary[0].kind, } } } #[derive(Debug)] struct BrushFace { vertices: Range<u32>, min: Vector3<f32>, max: Vector3<f32>, texture_id: usize, lightmap_ids: Vec<usize>, light_styles: [u8; 4], /// Indicates whether the face should be drawn this frame. /// /// This is set to false by default, and will be set to true if the model is /// a worldmodel and the containing leaf is in the PVS. If the model is not /// a worldmodel, this flag is ignored. draw_flag: Cell<bool>, } struct BrushLeaf { facelist_ids: Range<usize>, } impl<B> std::convert::From<B> for BrushLeaf where B: std::borrow::Borrow<BspLeaf>, { fn from(bsp_leaf: B) -> Self { let bsp_leaf = bsp_leaf.borrow(); BrushLeaf { facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count, } } } pub struct BrushRendererBuilder { bsp_data: Rc<BspData>, face_range: Range<usize>, leaves: Option<Vec<BrushLeaf>>, per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>, per_face_bind_groups: Vec<wgpu::BindGroup>, vertices: Vec<BrushVertex>, faces: Vec<BrushFace>, texture_chains: HashMap<usize, Vec<usize>>, textures: Vec<BrushTexture>, lightmaps: Vec<wgpu::Texture>, //lightmap_views: Vec<wgpu::TextureView>, } impl BrushRendererBuilder { pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder { BrushRendererBuilder { bsp_data: bsp_model.bsp_data().clone(), face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count, leaves: if worldmodel { Some( bsp_model .iter_leaves() .map(|leaf| BrushLeaf::from(leaf)) .collect(), ) } else { None }, per_texture_bind_groups: RefCell::new(Vec::new()), per_face_bind_groups: Vec::new(), vertices: Vec::new(), faces: Vec::new(), texture_chains: HashMap::new(), textures: Vec::new(), lightmaps: Vec::new(), //lightmap_views: Vec::new(), } } fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace { let face = &self.bsp_data.faces()[face_id]; let face_vert_id = self.vertices.len(); let texinfo = &self.bsp_data.texinfo()[face.texinfo_id]; let tex = &self.bsp_data.textures()[texinfo.tex_id]; let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY); let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY); let no_collinear = math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect()); for vert in no_collinear.iter() { for component in 0..3 { min[component] = min[component].min(vert[component]); max[component] = max[component].max(vert[component]); } } if tex.name().starts_with("*") { // tessellate the surface so we can do texcoord warping let verts = warp::subdivide(no_collinear); let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); for vert in verts.into_iter() { self.vertices.push(BrushVertex { position: vert.into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo), lightmap_anim: face.light_styles, }) } } else { // expand the vertices into a triangle list. // the vertices are guaranteed to be in valid triangle fan order (that's // how GLQuake renders them) so we expand from triangle fan to triangle // list order. // // v1 is the base vertex, so it remains constant. // v2 takes the previous value of v3. // v3 is the newest vertex. let verts = no_collinear; let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); let mut vert_iter = verts.into_iter(); let v1 = vert_iter.next().unwrap(); let mut v2 = vert_iter.next().unwrap(); for v3 in vert_iter { let tri = &[v1, v2, v3]; // skip collinear points for vert in tri.iter() { self.vertices.push(BrushVertex { position: (*vert).into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords( (*vert).into(), face, texinfo, ), lightmap_anim: face.light_styles, }); } v2 = v3; } } // build the lightmaps let lightmaps = if!texinfo.special { self.bsp_data.face_lightmaps(face_id) } else { Vec::new() }; let mut lightmap_ids = Vec::new(); for lightmap in lightmaps { let lightmap_data = TextureData::Lightmap(LightmapData { lightmap: Cow::Borrowed(lightmap.data()), }); let texture = state.create_texture(None, lightmap.width(), lightmap.height(), &lightmap_data); let id = self.lightmaps.len(); self.lightmaps.push(texture); //self.lightmap_views //.push(self.lightmaps[id].create_view(&Default::default())); lightmap_ids.push(id); } BrushFace { vertices: face_vert_id as u32..self.vertices.len() as u32, min, max, texture_id: texinfo.tex_id as usize, lightmap_ids, light_styles: face.light_styles, draw_flag: Cell::new(true), } } fn create_per_texture_bind_group( &self, state: &GraphicsState, tex: &BrushTextureFrame, ) -> wgpu::BindGroup { let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerTexture); let desc = wgpu::BindGroupDescriptor { label: Some("per-texture bind group"), layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&tex.diffuse_view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(&tex.fullbright_view), }, ], }; state.device().create_bind_group(&desc) } fn create_per_face_bind_group(&self, state: &GraphicsState, face_id: usize) -> wgpu::BindGroup { let mut lightmap_views: Vec<_> = self.faces[face_id] .lightmap_ids .iter() .map(|id| self.lightmaps[*id].create_view(&Default::default())) .collect(); lightmap_views.resize_with(4, || { state.default_lightmap().create_view(&Default::default()) }); let lightmap_view_refs = lightmap_views.iter().collect::<Vec<_>>(); let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerFace); let desc = wgpu::BindGroupDescriptor { label: Some("per-face bind group"), layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureViewArray(&lightmap_view_refs[..]), }], }; state.device().create_bind_group(&desc) } fn create_brush_texture_frame<S>( &self, state: &GraphicsState, mipmap: &[u8], width: u32, height: u32, name: S, ) -> BrushTextureFrame where S: AsRef<str>, { let name = name.as_ref(); let (diffuse_data, fullbright_data) = state.palette().translate(mipmap); let diffuse = state.create_texture(None, width, height, &TextureData::Diffuse(diffuse_data)); let fullbright = state.create_texture( None, width, height, &TextureData::Fullbright(fullbright_data), ); let diffuse_view = diffuse.create_view(&Default::default()); let fullbright_view = fullbright.create_view(&Default::default()); let kind = if name.starts_with("sky") { TextureKind::Sky } else if name.starts_with("*") { TextureKind::Warp } else { TextureKind::Normal }; let mut frame = BrushTextureFrame { bind_group_id: 0, diffuse, fullbright, diffuse_view, fullbright_view, kind, }; // generate texture bind group let per_texture_bind_group = self.create_per_texture_bind_group(state, &frame); let bind_group_id = self.per_texture_bind_groups.borrow().len(); self.per_texture_bind_groups .borrow_mut() .push(per_texture_bind_group); frame.bind_group_id = bind_group_id; frame } pub fn create_brush_texture(&self, state: &GraphicsState, tex: &BspTexture) -> BrushTexture { // TODO: upload mipmaps let (width, height) = tex.dimensions(); match tex.kind() { // sequence animated textures BspTextureKind::Animated { primary, alternate } => { let primary_frames: Vec<_> = primary .iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full), width,
let layout_refs: Vec<_> = world_bind_group_layouts .iter() .chain(self.bind_group_layouts.iter()) .collect(); self.pipeline = BrushPipeline::recreate(device, compiler, &layout_refs, sample_count); }
identifier_body
brush.rs
ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, // fullbright texture wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, ], &[ // lightmap texture array wgpu::BindGroupLayoutEntry { count: NonZeroU32::new(4), binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, }, ], ]; lazy_static! { static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] = wgpu::vertex_attr_array![ // position 0 => Float32x3, // normal 1 => Float32x3, // diffuse texcoord 2 => Float32x2, // lightmap texcoord 3 => Float32x2, // lightmap animation ids 4 => Uint8x4, ]; } impl Pipeline for BrushPipeline { type VertexPushConstants = VertexPushConstants; type SharedPushConstants = SharedPushConstants; type FragmentPushConstants = (); fn name() -> &'static str { "brush" } fn vertex_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert")) } fn fragment_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag")) } // NOTE: if any of the binding indices are changed, they must also be changed in // the corresponding shaders and the BindGroupLayout generation functions. fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> { vec![ // group 2: updated per-texture wgpu::BindGroupLayoutDescriptor { label: Some("brush per-texture bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[0], }, // group 3: updated per-face wgpu::BindGroupLayoutDescriptor { label: Some("brush per-face bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[1], }, ] } fn primitive_state() -> wgpu::PrimitiveState { WorldPipelineBase::primitive_state() } fn color_target_states() -> Vec<wgpu::ColorTargetState> { WorldPipelineBase::color_target_states() } fn depth_stencil_state() -> Option<wgpu::DepthStencilState> { WorldPipelineBase::depth_stencil_state() } // NOTE: if the vertex format is changed, this descriptor must also be changed accordingly. fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> { vec![wgpu::VertexBufferLayout { array_stride: size_of::<BrushVertex>() as u64, step_mode: wgpu::InputStepMode::Vertex, attributes: &VERTEX_ATTRIBUTES[..], }] } } fn calculate_lightmap_texcoords( position: Vector3<f32>, face: &BspFace, texinfo: &BspTexInfo, ) -> [f32; 2] { let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset; s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0; s += 0.5; s /= face.extents[0] as f32; let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset; t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0; t += 0.5; t /= face.extents[1] as f32; [s, t] } type Position = [f32; 3]; type Normal = [f32; 3]; type DiffuseTexcoord = [f32; 2]; type LightmapTexcoord = [f32; 2]; type LightmapAnim = [u8; 4]; #[repr(C)] #[derive(Clone, Copy, Debug)] struct BrushVertex { position: Position, normal: Normal, diffuse_texcoord: DiffuseTexcoord, lightmap_texcoord: LightmapTexcoord, lightmap_anim: LightmapAnim, } #[repr(u32)] #[derive(Clone, Copy, Debug)] pub enum TextureKind { Normal = 0, Warp = 1, Sky = 2, } /// A single frame of a brush texture. pub struct BrushTextureFrame { bind_group_id: usize, diffuse: wgpu::Texture, fullbright: wgpu::Texture, diffuse_view: wgpu::TextureView, fullbright_view: wgpu::TextureView, kind: TextureKind, } /// A brush texture. pub enum BrushTexture { /// A brush texture with a single frame. Static(BrushTextureFrame), /// A brush texture with multiple frames. /// /// Animated brush textures advance one frame every 200 milliseconds, i.e., /// they have a framerate of 5 fps. Animated { primary: Vec<BrushTextureFrame>, alternate: Option<Vec<BrushTextureFrame>>, }, } impl BrushTexture { fn kind(&self) -> TextureKind { match self { BrushTexture::Static(ref frame) => frame.kind, BrushTexture::Animated { ref primary,.. } => primary[0].kind, } } } #[derive(Debug)] struct BrushFace { vertices: Range<u32>, min: Vector3<f32>, max: Vector3<f32>, texture_id: usize, lightmap_ids: Vec<usize>, light_styles: [u8; 4], /// Indicates whether the face should be drawn this frame. /// /// This is set to false by default, and will be set to true if the model is /// a worldmodel and the containing leaf is in the PVS. If the model is not /// a worldmodel, this flag is ignored. draw_flag: Cell<bool>, } struct BrushLeaf { facelist_ids: Range<usize>, } impl<B> std::convert::From<B> for BrushLeaf where B: std::borrow::Borrow<BspLeaf>, { fn from(bsp_leaf: B) -> Self { let bsp_leaf = bsp_leaf.borrow(); BrushLeaf { facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count, } } } pub struct BrushRendererBuilder { bsp_data: Rc<BspData>, face_range: Range<usize>, leaves: Option<Vec<BrushLeaf>>, per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>, per_face_bind_groups: Vec<wgpu::BindGroup>, vertices: Vec<BrushVertex>, faces: Vec<BrushFace>, texture_chains: HashMap<usize, Vec<usize>>, textures: Vec<BrushTexture>, lightmaps: Vec<wgpu::Texture>, //lightmap_views: Vec<wgpu::TextureView>, } impl BrushRendererBuilder { pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder { BrushRendererBuilder { bsp_data: bsp_model.bsp_data().clone(), face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count, leaves: if worldmodel { Some( bsp_model .iter_leaves() .map(|leaf| BrushLeaf::from(leaf)) .collect(), ) } else { None }, per_texture_bind_groups: RefCell::new(Vec::new()), per_face_bind_groups: Vec::new(), vertices: Vec::new(), faces: Vec::new(), texture_chains: HashMap::new(), textures: Vec::new(), lightmaps: Vec::new(), //lightmap_views: Vec::new(), } } fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace { let face = &self.bsp_data.faces()[face_id]; let face_vert_id = self.vertices.len(); let texinfo = &self.bsp_data.texinfo()[face.texinfo_id]; let tex = &self.bsp_data.textures()[texinfo.tex_id]; let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY); let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY); let no_collinear = math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect()); for vert in no_collinear.iter() { for component in 0..3 { min[component] = min[component].min(vert[component]); max[component] = max[component].max(vert[component]); } } if tex.name().starts_with("*") { // tessellate the surface so we can do texcoord warping let verts = warp::subdivide(no_collinear); let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); for vert in verts.into_iter() { self.vertices.push(BrushVertex { position: vert.into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo), lightmap_anim: face.light_styles, }) } } else { // expand the vertices into a triangle list. // the vertices are guaranteed to be in valid triangle fan order (that's // how GLQuake renders them) so we expand from triangle fan to triangle // list order. // // v1 is the base vertex, so it remains constant. // v2 takes the previous value of v3. // v3 is the newest vertex. let verts = no_collinear; let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); let mut vert_iter = verts.into_iter(); let v1 = vert_iter.next().unwrap(); let mut v2 = vert_iter.next().unwrap(); for v3 in vert_iter { let tri = &[v1, v2, v3]; // skip collinear points for vert in tri.iter() { self.vertices.push(BrushVertex { position: (*vert).into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords( (*vert).into(), face, texinfo, ), lightmap_anim: face.light_styles, }); } v2 = v3; } } // build the lightmaps let lightmaps = if!texinfo.special { self.bsp_data.face_lightmaps(face_id) } else { Vec::new() }; let mut lightmap_ids = Vec::new(); for lightmap in lightmaps { let lightmap_data = TextureData::Lightmap(LightmapData { lightmap: Cow::Borrowed(lightmap.data()), }); let texture = state.create_texture(None, lightmap.width(), lightmap.height(), &lightmap_data); let id = self.lightmaps.len(); self.lightmaps.push(texture); //self.lightmap_views //.push(self.lightmaps[id].create_view(&Default::default())); lightmap_ids.push(id); } BrushFace { vertices: face_vert_id as u32..self.vertices.len() as u32, min, max, texture_id: texinfo.tex_id as usize, lightmap_ids, light_styles: face.light_styles, draw_flag: Cell::new(true), } } fn create_per_texture_bind_group( &self, state: &GraphicsState, tex: &BrushTextureFrame, ) -> wgpu::BindGroup { let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerTexture); let desc = wgpu::BindGroupDescriptor { label: Some("per-texture bind group"), layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&tex.diffuse_view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(&tex.fullbright_view), }, ], }; state.device().create_bind_group(&desc) } fn create_per_face_bind_group(&self, state: &GraphicsState, face_id: usize) -> wgpu::BindGroup { let mut lightmap_views: Vec<_> = self.faces[face_id] .lightmap_ids .iter() .map(|id| self.lightmaps[*id].create_view(&Default::default())) .collect(); lightmap_views.resize_with(4, || { state.default_lightmap().create_view(&Default::default()) }); let lightmap_view_refs = lightmap_views.iter().collect::<Vec<_>>(); let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerFace); let desc = wgpu::BindGroupDescriptor { label: Some("per-face bind group"), layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureViewArray(&lightmap_view_refs[..]), }], }; state.device().create_bind_group(&desc) } fn create_brush_texture_frame<S>( &self, state: &GraphicsState, mipmap: &[u8], width: u32, height: u32, name: S, ) -> BrushTextureFrame where S: AsRef<str>, { let name = name.as_ref(); let (diffuse_data, fullbright_data) = state.palette().translate(mipmap); let diffuse = state.create_texture(None, width, height, &TextureData::Diffuse(diffuse_data)); let fullbright = state.create_texture( None, width, height, &TextureData::Fullbright(fullbright_data), ); let diffuse_view = diffuse.create_view(&Default::default()); let fullbright_view = fullbright.create_view(&Default::default()); let kind = if name.starts_with("sky") { TextureKind::Sky } else if name.starts_with("*") { TextureKind::Warp } else { TextureKind::Normal }; let mut frame = BrushTextureFrame { bind_group_id: 0, diffuse, fullbright, diffuse_view, fullbright_view, kind, }; // generate texture bind group let per_texture_bind_group = self.create_per_texture_bind_group(state, &frame); let bind_group_id = self.per_texture_bind_groups.borrow().len(); self.per_texture_bind_groups .borrow_mut() .push(per_texture_bind_group); frame.bind_group_id = bind_group_id; frame } pub fn create_brush_texture(&self, state: &GraphicsState, tex: &BspTexture) -> BrushTexture { // TODO: upload mipmaps let (width, height) = tex.dimensions(); match tex.kind() { // sequence animated textures BspTextureKind::Animated { primary, alternate } => {
width, height, tex.name(), ) })
let primary_frames: Vec<_> = primary .iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full), width, height, tex.name(), ) }) .collect(); let alternate_frames: Option<Vec<_>> = alternate.as_ref().map(|a| { a.iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full),
conditional_block
brush.rs
&self) -> &[wgpu::BindGroupLayout] { &self.bind_group_layouts } pub fn bind_group_layout(&self, id: BindGroupLayoutId) -> &wgpu::BindGroupLayout { assert!(id as usize >= BindGroupLayoutId::PerTexture as usize); &self.bind_group_layouts[id as usize - BindGroupLayoutId::PerTexture as usize] } } #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct VertexPushConstants { pub transform: Matrix4<f32>, pub model_view: Matrix4<f32>, } #[repr(C)] #[derive(Copy, Clone, Debug)] pub struct SharedPushConstants { pub texture_kind: u32, } const BIND_GROUP_LAYOUT_ENTRIES: &[&[wgpu::BindGroupLayoutEntry]] = &[ &[ // diffuse texture, updated once per face wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, // fullbright texture wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, count: None, }, ], &[ // lightmap texture array wgpu::BindGroupLayoutEntry { count: NonZeroU32::new(4), binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { view_dimension: wgpu::TextureViewDimension::D2, sample_type: wgpu::TextureSampleType::Float { filterable: true }, multisampled: false, }, }, ], ]; lazy_static! { static ref VERTEX_ATTRIBUTES: [wgpu::VertexAttribute; 5] = wgpu::vertex_attr_array![ // position 0 => Float32x3, // normal 1 => Float32x3, // diffuse texcoord 2 => Float32x2, // lightmap texcoord 3 => Float32x2, // lightmap animation ids 4 => Uint8x4, ]; } impl Pipeline for BrushPipeline { type VertexPushConstants = VertexPushConstants; type SharedPushConstants = SharedPushConstants; type FragmentPushConstants = (); fn name() -> &'static str { "brush" } fn vertex_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.vert")) } fn fragment_shader() -> &'static str { include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/shaders/brush.frag")) } // NOTE: if any of the binding indices are changed, they must also be changed in // the corresponding shaders and the BindGroupLayout generation functions. fn bind_group_layout_descriptors() -> Vec<wgpu::BindGroupLayoutDescriptor<'static>> { vec![ // group 2: updated per-texture wgpu::BindGroupLayoutDescriptor { label: Some("brush per-texture bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[0], }, // group 3: updated per-face wgpu::BindGroupLayoutDescriptor { label: Some("brush per-face bind group"), entries: BIND_GROUP_LAYOUT_ENTRIES[1], }, ] } fn primitive_state() -> wgpu::PrimitiveState { WorldPipelineBase::primitive_state() } fn color_target_states() -> Vec<wgpu::ColorTargetState> { WorldPipelineBase::color_target_states() } fn depth_stencil_state() -> Option<wgpu::DepthStencilState> { WorldPipelineBase::depth_stencil_state() } // NOTE: if the vertex format is changed, this descriptor must also be changed accordingly. fn vertex_buffer_layouts() -> Vec<wgpu::VertexBufferLayout<'static>> { vec![wgpu::VertexBufferLayout { array_stride: size_of::<BrushVertex>() as u64, step_mode: wgpu::InputStepMode::Vertex, attributes: &VERTEX_ATTRIBUTES[..], }] } } fn calculate_lightmap_texcoords( position: Vector3<f32>, face: &BspFace, texinfo: &BspTexInfo, ) -> [f32; 2] { let mut s = texinfo.s_vector.dot(position) + texinfo.s_offset; s -= (face.texture_mins[0] as f32 / 16.0).floor() * 16.0; s += 0.5; s /= face.extents[0] as f32; let mut t = texinfo.t_vector.dot(position) + texinfo.t_offset; t -= (face.texture_mins[1] as f32 / 16.0).floor() * 16.0; t += 0.5; t /= face.extents[1] as f32; [s, t] } type Position = [f32; 3]; type Normal = [f32; 3]; type DiffuseTexcoord = [f32; 2]; type LightmapTexcoord = [f32; 2]; type LightmapAnim = [u8; 4]; #[repr(C)] #[derive(Clone, Copy, Debug)] struct BrushVertex { position: Position, normal: Normal, diffuse_texcoord: DiffuseTexcoord, lightmap_texcoord: LightmapTexcoord, lightmap_anim: LightmapAnim, } #[repr(u32)] #[derive(Clone, Copy, Debug)] pub enum TextureKind { Normal = 0, Warp = 1, Sky = 2, } /// A single frame of a brush texture. pub struct BrushTextureFrame { bind_group_id: usize, diffuse: wgpu::Texture, fullbright: wgpu::Texture, diffuse_view: wgpu::TextureView, fullbright_view: wgpu::TextureView, kind: TextureKind, } /// A brush texture. pub enum BrushTexture { /// A brush texture with a single frame. Static(BrushTextureFrame), /// A brush texture with multiple frames. /// /// Animated brush textures advance one frame every 200 milliseconds, i.e., /// they have a framerate of 5 fps. Animated { primary: Vec<BrushTextureFrame>, alternate: Option<Vec<BrushTextureFrame>>, }, } impl BrushTexture { fn kind(&self) -> TextureKind { match self { BrushTexture::Static(ref frame) => frame.kind, BrushTexture::Animated { ref primary,.. } => primary[0].kind, } } } #[derive(Debug)] struct BrushFace { vertices: Range<u32>, min: Vector3<f32>, max: Vector3<f32>, texture_id: usize, lightmap_ids: Vec<usize>, light_styles: [u8; 4], /// Indicates whether the face should be drawn this frame. /// /// This is set to false by default, and will be set to true if the model is /// a worldmodel and the containing leaf is in the PVS. If the model is not /// a worldmodel, this flag is ignored. draw_flag: Cell<bool>, } struct BrushLeaf { facelist_ids: Range<usize>, } impl<B> std::convert::From<B> for BrushLeaf where B: std::borrow::Borrow<BspLeaf>, { fn from(bsp_leaf: B) -> Self { let bsp_leaf = bsp_leaf.borrow(); BrushLeaf { facelist_ids: bsp_leaf.facelist_id..bsp_leaf.facelist_id + bsp_leaf.facelist_count, } } } pub struct BrushRendererBuilder { bsp_data: Rc<BspData>, face_range: Range<usize>, leaves: Option<Vec<BrushLeaf>>, per_texture_bind_groups: RefCell<Vec<wgpu::BindGroup>>, per_face_bind_groups: Vec<wgpu::BindGroup>, vertices: Vec<BrushVertex>, faces: Vec<BrushFace>, texture_chains: HashMap<usize, Vec<usize>>, textures: Vec<BrushTexture>, lightmaps: Vec<wgpu::Texture>, //lightmap_views: Vec<wgpu::TextureView>, } impl BrushRendererBuilder { pub fn new(bsp_model: &BspModel, worldmodel: bool) -> BrushRendererBuilder { BrushRendererBuilder { bsp_data: bsp_model.bsp_data().clone(), face_range: bsp_model.face_id..bsp_model.face_id + bsp_model.face_count, leaves: if worldmodel { Some( bsp_model .iter_leaves() .map(|leaf| BrushLeaf::from(leaf)) .collect(), ) } else { None }, per_texture_bind_groups: RefCell::new(Vec::new()), per_face_bind_groups: Vec::new(), vertices: Vec::new(), faces: Vec::new(), texture_chains: HashMap::new(), textures: Vec::new(), lightmaps: Vec::new(), //lightmap_views: Vec::new(), } } fn create_face(&mut self, state: &GraphicsState, face_id: usize) -> BrushFace { let face = &self.bsp_data.faces()[face_id]; let face_vert_id = self.vertices.len(); let texinfo = &self.bsp_data.texinfo()[face.texinfo_id]; let tex = &self.bsp_data.textures()[texinfo.tex_id]; let mut min = Vector3::new(f32::INFINITY, f32::INFINITY, f32::INFINITY); let mut max = Vector3::new(f32::NEG_INFINITY, f32::NEG_INFINITY, f32::NEG_INFINITY); let no_collinear = math::remove_collinear(self.bsp_data.face_iter_vertices(face_id).collect()); for vert in no_collinear.iter() { for component in 0..3 { min[component] = min[component].min(vert[component]); max[component] = max[component].max(vert[component]); } } if tex.name().starts_with("*") { // tessellate the surface so we can do texcoord warping let verts = warp::subdivide(no_collinear); let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); for vert in verts.into_iter() { self.vertices.push(BrushVertex { position: vert.into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords(vert.into(), face, texinfo), lightmap_anim: face.light_styles, }) } } else { // expand the vertices into a triangle list. // the vertices are guaranteed to be in valid triangle fan order (that's // how GLQuake renders them) so we expand from triangle fan to triangle // list order. // // v1 is the base vertex, so it remains constant. // v2 takes the previous value of v3. // v3 is the newest vertex. let verts = no_collinear; let normal = (verts[0] - verts[1]).cross(verts[2] - verts[1]).normalize(); let mut vert_iter = verts.into_iter(); let v1 = vert_iter.next().unwrap(); let mut v2 = vert_iter.next().unwrap(); for v3 in vert_iter { let tri = &[v1, v2, v3]; // skip collinear points for vert in tri.iter() { self.vertices.push(BrushVertex { position: (*vert).into(), normal: normal.into(), diffuse_texcoord: [ ((vert.dot(texinfo.s_vector) + texinfo.s_offset) / tex.width() as f32), ((vert.dot(texinfo.t_vector) + texinfo.t_offset) / tex.height() as f32), ], lightmap_texcoord: calculate_lightmap_texcoords( (*vert).into(), face, texinfo, ), lightmap_anim: face.light_styles, }); } v2 = v3; } } // build the lightmaps let lightmaps = if!texinfo.special { self.bsp_data.face_lightmaps(face_id) } else { Vec::new() }; let mut lightmap_ids = Vec::new(); for lightmap in lightmaps { let lightmap_data = TextureData::Lightmap(LightmapData { lightmap: Cow::Borrowed(lightmap.data()), }); let texture = state.create_texture(None, lightmap.width(), lightmap.height(), &lightmap_data); let id = self.lightmaps.len(); self.lightmaps.push(texture); //self.lightmap_views //.push(self.lightmaps[id].create_view(&Default::default())); lightmap_ids.push(id); } BrushFace { vertices: face_vert_id as u32..self.vertices.len() as u32, min, max, texture_id: texinfo.tex_id as usize, lightmap_ids, light_styles: face.light_styles, draw_flag: Cell::new(true), } } fn create_per_texture_bind_group( &self, state: &GraphicsState, tex: &BrushTextureFrame, ) -> wgpu::BindGroup { let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerTexture); let desc = wgpu::BindGroupDescriptor { label: Some("per-texture bind group"), layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&tex.diffuse_view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(&tex.fullbright_view), }, ], }; state.device().create_bind_group(&desc) } fn create_per_face_bind_group(&self, state: &GraphicsState, face_id: usize) -> wgpu::BindGroup { let mut lightmap_views: Vec<_> = self.faces[face_id] .lightmap_ids .iter() .map(|id| self.lightmaps[*id].create_view(&Default::default())) .collect(); lightmap_views.resize_with(4, || { state.default_lightmap().create_view(&Default::default()) }); let lightmap_view_refs = lightmap_views.iter().collect::<Vec<_>>(); let layout = &state .brush_pipeline() .bind_group_layout(BindGroupLayoutId::PerFace); let desc = wgpu::BindGroupDescriptor { label: Some("per-face bind group"), layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureViewArray(&lightmap_view_refs[..]), }], }; state.device().create_bind_group(&desc) } fn create_brush_texture_frame<S>( &self, state: &GraphicsState, mipmap: &[u8], width: u32, height: u32, name: S, ) -> BrushTextureFrame where S: AsRef<str>, { let name = name.as_ref(); let (diffuse_data, fullbright_data) = state.palette().translate(mipmap); let diffuse = state.create_texture(None, width, height, &TextureData::Diffuse(diffuse_data)); let fullbright = state.create_texture( None, width, height, &TextureData::Fullbright(fullbright_data), ); let diffuse_view = diffuse.create_view(&Default::default()); let fullbright_view = fullbright.create_view(&Default::default()); let kind = if name.starts_with("sky") { TextureKind::Sky } else if name.starts_with("*") { TextureKind::Warp } else { TextureKind::Normal }; let mut frame = BrushTextureFrame { bind_group_id: 0, diffuse, fullbright, diffuse_view, fullbright_view, kind, }; // generate texture bind group let per_texture_bind_group = self.create_per_texture_bind_group(state, &frame); let bind_group_id = self.per_texture_bind_groups.borrow().len(); self.per_texture_bind_groups .borrow_mut() .push(per_texture_bind_group); frame.bind_group_id = bind_group_id; frame } pub fn create_brush_texture(&self, state: &GraphicsState, tex: &BspTexture) -> BrushTexture { // TODO: upload mipmaps let (width, height) = tex.dimensions(); match tex.kind() { // sequence animated textures BspTextureKind::Animated { primary, alternate } => { let primary_frames: Vec<_> = primary .iter() .map(|f| { self.create_brush_texture_frame( state, f.mipmap(BspTextureMipmap::Full), width, height, tex.name(), ) }) .collect(); let alternate_frames: Option<Vec<_>> = alternate.as_ref().map(|a| {
ind_group_layouts(
identifier_name
glsl3.rs
use std::mem::size_of; use std::ptr; use crossfont::RasterizedGlyph; use log::info; use alacritty_terminal::term::cell::Flags; use crate::display::content::RenderableCell; use crate::display::SizeInfo; use crate::gl; use crate::gl::types::*; use crate::renderer::shader::{ShaderProgram, ShaderVersion}; use crate::renderer::{cstr, Error}; use super::atlas::{Atlas, ATLAS_SIZE}; use super::{ Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi, TextRenderBatch, TextRenderer, TextShader, }; // Shader source. pub static TEXT_SHADER_F: &str = include_str!("../../../res/glsl3/text.f.glsl"); static TEXT_SHADER_V: &str = include_str!("../../../res/glsl3/text.v.glsl"); /// Maximum items to be drawn in a batch. const BATCH_MAX: usize = 0x1_0000; #[derive(Debug)] pub struct Glsl3Renderer { program: TextShaderProgram, vao: GLuint, ebo: GLuint, vbo_instance: GLuint, atlas: Vec<Atlas>, current_atlas: usize, active_tex: GLuint, batch: Batch, } impl Glsl3Renderer { pub fn new() -> Result<Self, Error> { info!("Using OpenGL 3.3 renderer"); let program = TextShaderProgram::new(ShaderVersion::Glsl3)?; let mut vao: GLuint = 0; let mut ebo: GLuint = 0; let mut vbo_instance: GLuint = 0; unsafe { gl::Enable(gl::BLEND); gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR); // Disable depth mask, as the renderer never uses depth tests. gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo_instance); gl::BindVertexArray(vao); // --------------------- // Set up element buffer // --------------------- let indices: [u32; 6] = [0, 1, 3, 1, 2, 3]; gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (6 * size_of::<u32>()) as isize, indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // ---------------------------- // Setup vertex instance buffer // ---------------------------- gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<InstanceData>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<InstanceData>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); gl::VertexAttribDivisor(index, 1); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Coords. add_attr!(2, gl::UNSIGNED_SHORT, u16); // Glyph offset and size. add_attr!(4, gl::SHORT, i16); // UV offset. add_attr!(4, gl::FLOAT, f32); // Color and cell flags. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a // huge margin. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); } Ok(Self { program, vao, ebo, vbo_instance, atlas: vec![Atlas::new(ATLAS_SIZE, false)], current_atlas: 0, active_tex: 0, batch: Batch::new(), }) } } impl<'a> TextRenderer<'a> for Glsl3Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); self.program.set_term_uniforms(size_info); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn program(&self) -> &Self::Shader { &self.program } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } impl Drop for Glsl3Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo_instance); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, } impl<'a> TextRenderApi<Batch> for RenderApi<'a> { fn batch(&mut self) -> &mut Batch { self.batch } fn render_batch(&mut self) { unsafe { gl::BufferSubData( gl::ARRAY_BUFFER, 0, self.batch.size() as isize, self.batch.instances.as_ptr() as *const _, ); } // Bind texture if necessary. if *self.active_tex!= self.batch.tex()
unsafe { self.program.set_rendering_pass(RenderingPass::Background); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); self.program.set_rendering_pass(RenderingPass::SubpixelPass1); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); } self.batch.clear(); } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph { Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized) } fn clear(&mut self) { Atlas::clear_atlas(self.atlas, self.current_atlas) } } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if!self.batch.is_empty() { self.render_batch(); } } } #[derive(Debug)] #[repr(C)] struct InstanceData { // Coords. col: u16, row: u16, // Glyph offset. left: i16, top: i16, // Glyph size. width: i16, height: i16, // UV offset. uv_left: f32, uv_bot: f32, // uv scale. uv_width: f32, uv_height: f32, // Color. r: u8, g: u8, b: u8, // Cell flags like multicolor or fullwidth character. cell_flags: RenderingGlyphFlags, // Background color. bg_r: u8, bg_g: u8, bg_b: u8, bg_a: u8, } #[derive(Debug, Default)] pub struct Batch { tex: GLuint, instances: Vec<InstanceData>, } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } let mut cell_flags = RenderingGlyphFlags::empty(); cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor); cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR)); self.instances.push(InstanceData { col: cell.point.column.0 as u16, row: cell.point.line as u16, top: glyph.top, left: glyph.left, width: glyph.width, height: glyph.height, uv_bot: glyph.uv_bot, uv_left: glyph.uv_left, uv_width: glyph.uv_width, uv_height: glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, cell_flags, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }); } } impl Batch { #[inline] pub fn new() -> Self { Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) } } #[inline] pub fn len(&self) -> usize { self.instances.len() } #[inline] pub fn capacity(&self) -> usize { BATCH_MAX } #[inline] pub fn size(&self) -> usize { self.len() * size_of::<InstanceData>() } pub fn clear(&mut self) { self.tex = 0; self.instances.clear(); } } /// Text drawing program. /// /// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a". #[derive(Debug)] pub struct TextShaderProgram { /// Shader program. program: ShaderProgram, /// Projection scale and offset uniform. u_projection: GLint, /// Cell dimensions (pixels). u_cell_dim: GLint, /// Background pass flag. /// /// Rendering is split into two passes; one for backgrounds, and one for text. u_rendering_pass: GLint, } impl TextShaderProgram { pub fn new(shader_version: ShaderVersion) -> Result<TextShaderProgram, Error> { let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, TEXT_SHADER_F)?; Ok(Self { u_projection: program.get_uniform_location(cstr!("projection"))?, u_cell_dim: program.get_uniform_location(cstr!("cellDim"))?, u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?, program, }) } fn set_term_uniforms(&self, props: &SizeInfo) { unsafe { gl::Uniform2f(self.u_cell_dim, props.cell_width(), props.cell_height()); } } fn set_rendering_pass(&self, rendering_pass: RenderingPass) { let value = match rendering_pass { RenderingPass::Background | RenderingPass::SubpixelPass1 => rendering_pass as i32, _ => unreachable!("provided pass is not supported in GLSL3 renderer"), }; unsafe { gl::Uniform1i(self.u_rendering_pass, value); } } } impl TextShader for TextShaderProgram { fn id(&self) -> GLuint { self.program.id() } fn projection_uniform(&self) -> GLint { self.u_projection } }
{ unsafe { gl::BindTexture(gl::TEXTURE_2D, self.batch.tex()); } *self.active_tex = self.batch.tex(); }
conditional_block
glsl3.rs
use std::mem::size_of; use std::ptr; use crossfont::RasterizedGlyph; use log::info; use alacritty_terminal::term::cell::Flags; use crate::display::content::RenderableCell; use crate::display::SizeInfo; use crate::gl; use crate::gl::types::*; use crate::renderer::shader::{ShaderProgram, ShaderVersion}; use crate::renderer::{cstr, Error}; use super::atlas::{Atlas, ATLAS_SIZE}; use super::{ Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi, TextRenderBatch, TextRenderer, TextShader, }; // Shader source. pub static TEXT_SHADER_F: &str = include_str!("../../../res/glsl3/text.f.glsl"); static TEXT_SHADER_V: &str = include_str!("../../../res/glsl3/text.v.glsl"); /// Maximum items to be drawn in a batch. const BATCH_MAX: usize = 0x1_0000; #[derive(Debug)] pub struct Glsl3Renderer { program: TextShaderProgram, vao: GLuint, ebo: GLuint, vbo_instance: GLuint, atlas: Vec<Atlas>, current_atlas: usize, active_tex: GLuint, batch: Batch, } impl Glsl3Renderer { pub fn new() -> Result<Self, Error> { info!("Using OpenGL 3.3 renderer"); let program = TextShaderProgram::new(ShaderVersion::Glsl3)?; let mut vao: GLuint = 0; let mut ebo: GLuint = 0; let mut vbo_instance: GLuint = 0; unsafe { gl::Enable(gl::BLEND); gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR); // Disable depth mask, as the renderer never uses depth tests. gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo_instance); gl::BindVertexArray(vao); // --------------------- // Set up element buffer // --------------------- let indices: [u32; 6] = [0, 1, 3, 1, 2, 3]; gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (6 * size_of::<u32>()) as isize, indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // ---------------------------- // Setup vertex instance buffer // ---------------------------- gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<InstanceData>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<InstanceData>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); gl::VertexAttribDivisor(index, 1); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Coords. add_attr!(2, gl::UNSIGNED_SHORT, u16); // Glyph offset and size. add_attr!(4, gl::SHORT, i16); // UV offset. add_attr!(4, gl::FLOAT, f32); // Color and cell flags. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a // huge margin. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); } Ok(Self { program, vao, ebo, vbo_instance, atlas: vec![Atlas::new(ATLAS_SIZE, false)], current_atlas: 0, active_tex: 0, batch: Batch::new(), }) } } impl<'a> TextRenderer<'a> for Glsl3Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); self.program.set_term_uniforms(size_info); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn program(&self) -> &Self::Shader { &self.program } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } impl Drop for Glsl3Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo_instance); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, } impl<'a> TextRenderApi<Batch> for RenderApi<'a> { fn batch(&mut self) -> &mut Batch { self.batch } fn render_batch(&mut self) { unsafe { gl::BufferSubData( gl::ARRAY_BUFFER, 0, self.batch.size() as isize, self.batch.instances.as_ptr() as *const _, ); } // Bind texture if necessary. if *self.active_tex!= self.batch.tex() { unsafe { gl::BindTexture(gl::TEXTURE_2D, self.batch.tex()); } *self.active_tex = self.batch.tex(); } unsafe { self.program.set_rendering_pass(RenderingPass::Background); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); self.program.set_rendering_pass(RenderingPass::SubpixelPass1); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); } self.batch.clear(); } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph { Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized) } fn clear(&mut self) { Atlas::clear_atlas(self.atlas, self.current_atlas) } } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if!self.batch.is_empty() { self.render_batch(); } } } #[derive(Debug)] #[repr(C)] struct InstanceData { // Coords. col: u16, row: u16, // Glyph offset. left: i16, top: i16, // Glyph size. width: i16, height: i16, // UV offset. uv_left: f32, uv_bot: f32, // uv scale. uv_width: f32, uv_height: f32, // Color. r: u8, g: u8, b: u8, // Cell flags like multicolor or fullwidth character. cell_flags: RenderingGlyphFlags, // Background color. bg_r: u8, bg_g: u8, bg_b: u8, bg_a: u8, } #[derive(Debug, Default)] pub struct Batch { tex: GLuint, instances: Vec<InstanceData>, } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } let mut cell_flags = RenderingGlyphFlags::empty(); cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor); cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR)); self.instances.push(InstanceData { col: cell.point.column.0 as u16, row: cell.point.line as u16, top: glyph.top, left: glyph.left, width: glyph.width, height: glyph.height, uv_bot: glyph.uv_bot, uv_left: glyph.uv_left, uv_width: glyph.uv_width, uv_height: glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, cell_flags, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }); } } impl Batch { #[inline] pub fn new() -> Self { Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) } } #[inline] pub fn len(&self) -> usize { self.instances.len() } #[inline] pub fn capacity(&self) -> usize { BATCH_MAX } #[inline] pub fn size(&self) -> usize { self.len() * size_of::<InstanceData>() } pub fn clear(&mut self) { self.tex = 0; self.instances.clear(); } } /// Text drawing program. /// /// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a". #[derive(Debug)] pub struct TextShaderProgram { /// Shader program. program: ShaderProgram, /// Projection scale and offset uniform. u_projection: GLint, /// Cell dimensions (pixels). u_cell_dim: GLint, /// Background pass flag. ///
impl TextShaderProgram { pub fn new(shader_version: ShaderVersion) -> Result<TextShaderProgram, Error> { let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, TEXT_SHADER_F)?; Ok(Self { u_projection: program.get_uniform_location(cstr!("projection"))?, u_cell_dim: program.get_uniform_location(cstr!("cellDim"))?, u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?, program, }) } fn set_term_uniforms(&self, props: &SizeInfo) { unsafe { gl::Uniform2f(self.u_cell_dim, props.cell_width(), props.cell_height()); } } fn set_rendering_pass(&self, rendering_pass: RenderingPass) { let value = match rendering_pass { RenderingPass::Background | RenderingPass::SubpixelPass1 => rendering_pass as i32, _ => unreachable!("provided pass is not supported in GLSL3 renderer"), }; unsafe { gl::Uniform1i(self.u_rendering_pass, value); } } } impl TextShader for TextShaderProgram { fn id(&self) -> GLuint { self.program.id() } fn projection_uniform(&self) -> GLint { self.u_projection } }
/// Rendering is split into two passes; one for backgrounds, and one for text. u_rendering_pass: GLint, }
random_line_split
glsl3.rs
use std::mem::size_of; use std::ptr; use crossfont::RasterizedGlyph; use log::info; use alacritty_terminal::term::cell::Flags; use crate::display::content::RenderableCell; use crate::display::SizeInfo; use crate::gl; use crate::gl::types::*; use crate::renderer::shader::{ShaderProgram, ShaderVersion}; use crate::renderer::{cstr, Error}; use super::atlas::{Atlas, ATLAS_SIZE}; use super::{ Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi, TextRenderBatch, TextRenderer, TextShader, }; // Shader source. pub static TEXT_SHADER_F: &str = include_str!("../../../res/glsl3/text.f.glsl"); static TEXT_SHADER_V: &str = include_str!("../../../res/glsl3/text.v.glsl"); /// Maximum items to be drawn in a batch. const BATCH_MAX: usize = 0x1_0000; #[derive(Debug)] pub struct Glsl3Renderer { program: TextShaderProgram, vao: GLuint, ebo: GLuint, vbo_instance: GLuint, atlas: Vec<Atlas>, current_atlas: usize, active_tex: GLuint, batch: Batch, } impl Glsl3Renderer { pub fn new() -> Result<Self, Error> { info!("Using OpenGL 3.3 renderer"); let program = TextShaderProgram::new(ShaderVersion::Glsl3)?; let mut vao: GLuint = 0; let mut ebo: GLuint = 0; let mut vbo_instance: GLuint = 0; unsafe { gl::Enable(gl::BLEND); gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR); // Disable depth mask, as the renderer never uses depth tests. gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo_instance); gl::BindVertexArray(vao); // --------------------- // Set up element buffer // --------------------- let indices: [u32; 6] = [0, 1, 3, 1, 2, 3]; gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (6 * size_of::<u32>()) as isize, indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // ---------------------------- // Setup vertex instance buffer // ---------------------------- gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<InstanceData>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<InstanceData>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); gl::VertexAttribDivisor(index, 1); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Coords. add_attr!(2, gl::UNSIGNED_SHORT, u16); // Glyph offset and size. add_attr!(4, gl::SHORT, i16); // UV offset. add_attr!(4, gl::FLOAT, f32); // Color and cell flags. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a // huge margin. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); } Ok(Self { program, vao, ebo, vbo_instance, atlas: vec![Atlas::new(ATLAS_SIZE, false)], current_atlas: 0, active_tex: 0, batch: Batch::new(), }) } } impl<'a> TextRenderer<'a> for Glsl3Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); self.program.set_term_uniforms(size_info); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn program(&self) -> &Self::Shader { &self.program } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } impl Drop for Glsl3Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo_instance); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } #[derive(Debug)] pub struct
<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, } impl<'a> TextRenderApi<Batch> for RenderApi<'a> { fn batch(&mut self) -> &mut Batch { self.batch } fn render_batch(&mut self) { unsafe { gl::BufferSubData( gl::ARRAY_BUFFER, 0, self.batch.size() as isize, self.batch.instances.as_ptr() as *const _, ); } // Bind texture if necessary. if *self.active_tex!= self.batch.tex() { unsafe { gl::BindTexture(gl::TEXTURE_2D, self.batch.tex()); } *self.active_tex = self.batch.tex(); } unsafe { self.program.set_rendering_pass(RenderingPass::Background); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); self.program.set_rendering_pass(RenderingPass::SubpixelPass1); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); } self.batch.clear(); } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph { Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized) } fn clear(&mut self) { Atlas::clear_atlas(self.atlas, self.current_atlas) } } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if!self.batch.is_empty() { self.render_batch(); } } } #[derive(Debug)] #[repr(C)] struct InstanceData { // Coords. col: u16, row: u16, // Glyph offset. left: i16, top: i16, // Glyph size. width: i16, height: i16, // UV offset. uv_left: f32, uv_bot: f32, // uv scale. uv_width: f32, uv_height: f32, // Color. r: u8, g: u8, b: u8, // Cell flags like multicolor or fullwidth character. cell_flags: RenderingGlyphFlags, // Background color. bg_r: u8, bg_g: u8, bg_b: u8, bg_a: u8, } #[derive(Debug, Default)] pub struct Batch { tex: GLuint, instances: Vec<InstanceData>, } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } let mut cell_flags = RenderingGlyphFlags::empty(); cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor); cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR)); self.instances.push(InstanceData { col: cell.point.column.0 as u16, row: cell.point.line as u16, top: glyph.top, left: glyph.left, width: glyph.width, height: glyph.height, uv_bot: glyph.uv_bot, uv_left: glyph.uv_left, uv_width: glyph.uv_width, uv_height: glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, cell_flags, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }); } } impl Batch { #[inline] pub fn new() -> Self { Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) } } #[inline] pub fn len(&self) -> usize { self.instances.len() } #[inline] pub fn capacity(&self) -> usize { BATCH_MAX } #[inline] pub fn size(&self) -> usize { self.len() * size_of::<InstanceData>() } pub fn clear(&mut self) { self.tex = 0; self.instances.clear(); } } /// Text drawing program. /// /// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a". #[derive(Debug)] pub struct TextShaderProgram { /// Shader program. program: ShaderProgram, /// Projection scale and offset uniform. u_projection: GLint, /// Cell dimensions (pixels). u_cell_dim: GLint, /// Background pass flag. /// /// Rendering is split into two passes; one for backgrounds, and one for text. u_rendering_pass: GLint, } impl TextShaderProgram { pub fn new(shader_version: ShaderVersion) -> Result<TextShaderProgram, Error> { let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, TEXT_SHADER_F)?; Ok(Self { u_projection: program.get_uniform_location(cstr!("projection"))?, u_cell_dim: program.get_uniform_location(cstr!("cellDim"))?, u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?, program, }) } fn set_term_uniforms(&self, props: &SizeInfo) { unsafe { gl::Uniform2f(self.u_cell_dim, props.cell_width(), props.cell_height()); } } fn set_rendering_pass(&self, rendering_pass: RenderingPass) { let value = match rendering_pass { RenderingPass::Background | RenderingPass::SubpixelPass1 => rendering_pass as i32, _ => unreachable!("provided pass is not supported in GLSL3 renderer"), }; unsafe { gl::Uniform1i(self.u_rendering_pass, value); } } } impl TextShader for TextShaderProgram { fn id(&self) -> GLuint { self.program.id() } fn projection_uniform(&self) -> GLint { self.u_projection } }
RenderApi
identifier_name
glsl3.rs
use std::mem::size_of; use std::ptr; use crossfont::RasterizedGlyph; use log::info; use alacritty_terminal::term::cell::Flags; use crate::display::content::RenderableCell; use crate::display::SizeInfo; use crate::gl; use crate::gl::types::*; use crate::renderer::shader::{ShaderProgram, ShaderVersion}; use crate::renderer::{cstr, Error}; use super::atlas::{Atlas, ATLAS_SIZE}; use super::{ Glyph, LoadGlyph, LoaderApi, RenderingGlyphFlags, RenderingPass, TextRenderApi, TextRenderBatch, TextRenderer, TextShader, }; // Shader source. pub static TEXT_SHADER_F: &str = include_str!("../../../res/glsl3/text.f.glsl"); static TEXT_SHADER_V: &str = include_str!("../../../res/glsl3/text.v.glsl"); /// Maximum items to be drawn in a batch. const BATCH_MAX: usize = 0x1_0000; #[derive(Debug)] pub struct Glsl3Renderer { program: TextShaderProgram, vao: GLuint, ebo: GLuint, vbo_instance: GLuint, atlas: Vec<Atlas>, current_atlas: usize, active_tex: GLuint, batch: Batch, } impl Glsl3Renderer { pub fn new() -> Result<Self, Error> { info!("Using OpenGL 3.3 renderer"); let program = TextShaderProgram::new(ShaderVersion::Glsl3)?; let mut vao: GLuint = 0; let mut ebo: GLuint = 0; let mut vbo_instance: GLuint = 0; unsafe { gl::Enable(gl::BLEND); gl::BlendFunc(gl::SRC1_COLOR, gl::ONE_MINUS_SRC1_COLOR); // Disable depth mask, as the renderer never uses depth tests. gl::DepthMask(gl::FALSE); gl::GenVertexArrays(1, &mut vao); gl::GenBuffers(1, &mut ebo); gl::GenBuffers(1, &mut vbo_instance); gl::BindVertexArray(vao); // --------------------- // Set up element buffer // --------------------- let indices: [u32; 6] = [0, 1, 3, 1, 2, 3]; gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ebo); gl::BufferData( gl::ELEMENT_ARRAY_BUFFER, (6 * size_of::<u32>()) as isize, indices.as_ptr() as *const _, gl::STATIC_DRAW, ); // ---------------------------- // Setup vertex instance buffer // ---------------------------- gl::BindBuffer(gl::ARRAY_BUFFER, vbo_instance); gl::BufferData( gl::ARRAY_BUFFER, (BATCH_MAX * size_of::<InstanceData>()) as isize, ptr::null(), gl::STREAM_DRAW, ); let mut index = 0; let mut size = 0; macro_rules! add_attr { ($count:expr, $gl_type:expr, $type:ty) => { gl::VertexAttribPointer( index, $count, $gl_type, gl::FALSE, size_of::<InstanceData>() as i32, size as *const _, ); gl::EnableVertexAttribArray(index); gl::VertexAttribDivisor(index, 1); #[allow(unused_assignments)] { size += $count * size_of::<$type>(); index += 1; } }; } // Coords. add_attr!(2, gl::UNSIGNED_SHORT, u16); // Glyph offset and size. add_attr!(4, gl::SHORT, i16); // UV offset. add_attr!(4, gl::FLOAT, f32); // Color and cell flags. // // These are packed together because of an OpenGL driver issue on macOS, which caused a // `vec3(u8)` text color and a `u8` cell flags to increase the rendering time by a // huge margin. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Background color. add_attr!(4, gl::UNSIGNED_BYTE, u8); // Cleanup. gl::BindVertexArray(0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); } Ok(Self { program, vao, ebo, vbo_instance, atlas: vec![Atlas::new(ATLAS_SIZE, false)], current_atlas: 0, active_tex: 0, batch: Batch::new(), }) } } impl<'a> TextRenderer<'a> for Glsl3Renderer { type RenderApi = RenderApi<'a>; type RenderBatch = Batch; type Shader = TextShaderProgram; fn with_api<'b: 'a, F, T>(&'b mut self, size_info: &'b SizeInfo, func: F) -> T where F: FnOnce(Self::RenderApi) -> T, { unsafe { gl::UseProgram(self.program.id()); self.program.set_term_uniforms(size_info); gl::BindVertexArray(self.vao); gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ebo); gl::BindBuffer(gl::ARRAY_BUFFER, self.vbo_instance); gl::ActiveTexture(gl::TEXTURE0); } let res = func(RenderApi { active_tex: &mut self.active_tex, batch: &mut self.batch, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, program: &mut self.program, }); unsafe { gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, 0); gl::BindBuffer(gl::ARRAY_BUFFER, 0); gl::BindVertexArray(0); gl::UseProgram(0); } res } fn program(&self) -> &Self::Shader { &self.program } fn loader_api(&mut self) -> LoaderApi<'_> { LoaderApi { active_tex: &mut self.active_tex, atlas: &mut self.atlas, current_atlas: &mut self.current_atlas, } } } impl Drop for Glsl3Renderer { fn drop(&mut self) { unsafe { gl::DeleteBuffers(1, &self.vbo_instance); gl::DeleteBuffers(1, &self.ebo); gl::DeleteVertexArrays(1, &self.vao); } } } #[derive(Debug)] pub struct RenderApi<'a> { active_tex: &'a mut GLuint, batch: &'a mut Batch, atlas: &'a mut Vec<Atlas>, current_atlas: &'a mut usize, program: &'a mut TextShaderProgram, } impl<'a> TextRenderApi<Batch> for RenderApi<'a> { fn batch(&mut self) -> &mut Batch { self.batch } fn render_batch(&mut self) { unsafe { gl::BufferSubData( gl::ARRAY_BUFFER, 0, self.batch.size() as isize, self.batch.instances.as_ptr() as *const _, ); } // Bind texture if necessary. if *self.active_tex!= self.batch.tex() { unsafe { gl::BindTexture(gl::TEXTURE_2D, self.batch.tex()); } *self.active_tex = self.batch.tex(); } unsafe { self.program.set_rendering_pass(RenderingPass::Background); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); self.program.set_rendering_pass(RenderingPass::SubpixelPass1); gl::DrawElementsInstanced( gl::TRIANGLES, 6, gl::UNSIGNED_INT, ptr::null(), self.batch.len() as GLsizei, ); } self.batch.clear(); } } impl<'a> LoadGlyph for RenderApi<'a> { fn load_glyph(&mut self, rasterized: &RasterizedGlyph) -> Glyph { Atlas::load_glyph(self.active_tex, self.atlas, self.current_atlas, rasterized) } fn clear(&mut self) { Atlas::clear_atlas(self.atlas, self.current_atlas) } } impl<'a> Drop for RenderApi<'a> { fn drop(&mut self) { if!self.batch.is_empty() { self.render_batch(); } } } #[derive(Debug)] #[repr(C)] struct InstanceData { // Coords. col: u16, row: u16, // Glyph offset. left: i16, top: i16, // Glyph size. width: i16, height: i16, // UV offset. uv_left: f32, uv_bot: f32, // uv scale. uv_width: f32, uv_height: f32, // Color. r: u8, g: u8, b: u8, // Cell flags like multicolor or fullwidth character. cell_flags: RenderingGlyphFlags, // Background color. bg_r: u8, bg_g: u8, bg_b: u8, bg_a: u8, } #[derive(Debug, Default)] pub struct Batch { tex: GLuint, instances: Vec<InstanceData>, } impl TextRenderBatch for Batch { #[inline] fn tex(&self) -> GLuint { self.tex } #[inline] fn full(&self) -> bool { self.capacity() == self.len() } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } fn add_item(&mut self, cell: &RenderableCell, glyph: &Glyph, _: &SizeInfo) { if self.is_empty() { self.tex = glyph.tex_id; } let mut cell_flags = RenderingGlyphFlags::empty(); cell_flags.set(RenderingGlyphFlags::COLORED, glyph.multicolor); cell_flags.set(RenderingGlyphFlags::WIDE_CHAR, cell.flags.contains(Flags::WIDE_CHAR)); self.instances.push(InstanceData { col: cell.point.column.0 as u16, row: cell.point.line as u16, top: glyph.top, left: glyph.left, width: glyph.width, height: glyph.height, uv_bot: glyph.uv_bot, uv_left: glyph.uv_left, uv_width: glyph.uv_width, uv_height: glyph.uv_height, r: cell.fg.r, g: cell.fg.g, b: cell.fg.b, cell_flags, bg_r: cell.bg.r, bg_g: cell.bg.g, bg_b: cell.bg.b, bg_a: (cell.bg_alpha * 255.0) as u8, }); } } impl Batch { #[inline] pub fn new() -> Self { Self { tex: 0, instances: Vec::with_capacity(BATCH_MAX) } } #[inline] pub fn len(&self) -> usize
#[inline] pub fn capacity(&self) -> usize { BATCH_MAX } #[inline] pub fn size(&self) -> usize { self.len() * size_of::<InstanceData>() } pub fn clear(&mut self) { self.tex = 0; self.instances.clear(); } } /// Text drawing program. /// /// Uniforms are prefixed with "u", and vertex attributes are prefixed with "a". #[derive(Debug)] pub struct TextShaderProgram { /// Shader program. program: ShaderProgram, /// Projection scale and offset uniform. u_projection: GLint, /// Cell dimensions (pixels). u_cell_dim: GLint, /// Background pass flag. /// /// Rendering is split into two passes; one for backgrounds, and one for text. u_rendering_pass: GLint, } impl TextShaderProgram { pub fn new(shader_version: ShaderVersion) -> Result<TextShaderProgram, Error> { let program = ShaderProgram::new(shader_version, None, TEXT_SHADER_V, TEXT_SHADER_F)?; Ok(Self { u_projection: program.get_uniform_location(cstr!("projection"))?, u_cell_dim: program.get_uniform_location(cstr!("cellDim"))?, u_rendering_pass: program.get_uniform_location(cstr!("renderingPass"))?, program, }) } fn set_term_uniforms(&self, props: &SizeInfo) { unsafe { gl::Uniform2f(self.u_cell_dim, props.cell_width(), props.cell_height()); } } fn set_rendering_pass(&self, rendering_pass: RenderingPass) { let value = match rendering_pass { RenderingPass::Background | RenderingPass::SubpixelPass1 => rendering_pass as i32, _ => unreachable!("provided pass is not supported in GLSL3 renderer"), }; unsafe { gl::Uniform1i(self.u_rendering_pass, value); } } } impl TextShader for TextShaderProgram { fn id(&self) -> GLuint { self.program.id() } fn projection_uniform(&self) -> GLint { self.u_projection } }
{ self.instances.len() }
identifier_body
service.rs
use crate::{ common::client::{ClientId, Credentials, Token}, coordinator, }; use bytes::Bytes; use derive_more::From; use futures::{ready, stream::Stream}; use std::{ collections::HashMap, error::Error, future::Future, pin::Pin, task::{Context, Poll}, }; use tarpc::context::current as rpc_context; use thiserror::Error; use tokio::{ stream::StreamExt, sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, oneshot, }, }; use tracing_futures::Instrument; use std::env; use std::fs::File; use std::io::prelude::*; use nix::sys::signal; use nix::unistd::Pid; use std::thread; use std::time::Duration;
/// A future that orchestrates the entire aggregator service. // TODO: maybe add a HashSet or HashMap of clients who already // uploaded their weights to prevent a client from uploading weights // multiple times. Or we could just remove that ID from the // `allowed_ids` map. // TODO: maybe add a HashSet for clients that are already // downloading/uploading, to prevent DoS attacks. pub struct Service<A> where A: Aggregator, { /// Clients that the coordinator selected for the current /// round. They can use their unique token to download the global /// weights and upload their own local results once they finished /// training. allowed_ids: HashMap<ClientId, Token>, /// The latest global weights as computed by the aggregator. // NOTE: We could store this directly in the task that handles the // HTTP requests. I initially though that having it here would // make it easier to bypass the HTTP layer, which is convenient // for testing because we can simulate client with just // AggregatorHandles. But maybe that's just another layer of // complexity that is not worth it. global_weights: Bytes, /// The aggregator itself, which handles the weights or performs /// the aggregations. aggregator: A, /// A client for the coordinator RPC service. rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, aggregation_future: Option<AggregationFuture<A>>, model_number: usize, } /// This trait defines the methods that an aggregator should /// implement. pub trait Aggregator { type Error: Error + Send +'static + Sync; type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin; type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send +'static; /// Check the validity of the given weights and if they are valid, /// add them to the set of weights to aggregate. fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut; /// Run the aggregator and return the result. fn aggregate(&mut self) -> Self::AggregateFut; } impl<A> Service<A> where A: Aggregator, { pub fn new( aggregator: A, rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, ) -> Self { Self { aggregator, requests, rpc_client, allowed_ids: HashMap::new(), global_weights: Bytes::new(), aggregation_future: None, model_number: 0, } } /// Handle the incoming requests. fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> { trace!("polling requests"); loop { match ready!(Pin::new(&mut self.requests).poll_next(cx)) { Some(request) => self.handle_request(request), None => { trace!("no more request to handle"); return Poll::Ready(()); } } } } fn handle_download_request(&mut self, request: DownloadRequest) { debug!("handling download request"); let DownloadRequest { credentials, response_tx, } = request; if self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false) { let _ = response_tx.send(Ok(self.global_weights.clone())); } else { warn!("rejecting download request"); let _ = response_tx.send(Err(DownloadError::Unauthorized)); } } fn handle_upload_request(&mut self, request: UploadRequest) { debug!("handling upload request"); let UploadRequest { credentials, data } = request; let accept_upload = self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false); if!accept_upload { warn!("rejecting upload request"); return; } let mut rpc_client = self.rpc_client.clone(); let fut = self.aggregator.add_weights(data); tokio::spawn( async move { let result = fut.await; debug!("sending end training request to the coordinator"); rpc_client .end_training(rpc_context(), *credentials.id(), result.is_ok()) .await .map_err(|e| { warn!( "failed to send end training request to the coordinator: {}", e ); }) } .instrument(trace_span!("end_training_rpc_request")), ); } fn handle_request(&mut self, request: Request<A>) { match request { Request::Download(req) => self.handle_download_request(req), Request::Upload(req) => self.handle_upload_request(req), Request::Select(req) => self.handle_select_request(req), Request::Aggregate(req) => self.handle_aggregate_request(req), } } fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) { info!("handling aggregate request"); let AggregateRequest { response_tx } = request; self.allowed_ids = HashMap::new(); self.aggregation_future = Some(AggregationFuture { future: self.aggregator.aggregate(), response_tx, }); } fn handle_select_request(&mut self, request: SelectRequest<A>) { info!("handling select request"); let SelectRequest { credentials, response_tx, } = request; let (id, token) = credentials.into_parts(); self.allowed_ids.insert(id, token); if response_tx.send(Ok(())).is_err() { warn!("failed to send reponse: channel closed"); } } #[allow(clippy::cognitive_complexity)] fn poll_aggregation(&mut self, cx: &mut Context) { // Check if we're waiting for an aggregation, ie whether // there's a future to poll. let future = if let Some(future) = self.aggregation_future.take() { future } else { trace!("no aggregation future running: skipping polling"); return; }; trace!("polling aggregation future"); let AggregationFuture { mut future, response_tx, } = future; let result = match Pin::new(&mut future).poll(cx) { Poll::Ready(Ok(weights)) => { info!("aggregation succeeded, settings global weights"); self.global_weights = weights; if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") { let file_name = format!("{}/model_{}.npy", path, self.model_number); let mut file = File::create(&file_name).unwrap(); info!("Writing model {}", file_name); file.write_all(&self.global_weights).unwrap(); self.model_number += 1; } Ok(()) } Poll::Ready(Err(e)) => { error!(error = %e, "aggregation failed"); Err(e) } Poll::Pending => { debug!("aggregation future still running"); self.aggregation_future = Some(AggregationFuture { future, response_tx, }); return; } }; if response_tx.send(result).is_err() { error!("failed to send aggregation response to RPC task: receiver dropped"); } if self.model_number == 10 { thread::sleep(Duration::from_millis(10 * 1000)); signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap(); } } } struct AggregationFuture<A> where A: Aggregator, { future: A::AggregateFut, response_tx: oneshot::Sender<Result<(), A::Error>>, } impl<A> Future for Service<A> where A: Aggregator + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!("polling Service"); let pin = self.get_mut(); if let Poll::Ready(_) = pin.poll_requests(cx) { return Poll::Ready(()); } pin.poll_aggregation(cx); Poll::Pending } } pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>) where A: Aggregator; impl<A> Stream for ServiceRequests<A> where A: Aggregator, { type Item = Request<A>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { trace!("polling ServiceRequests"); self.0.as_mut().poll_next(cx) } } impl<A> ServiceRequests<A> where A: Aggregator +'static, { fn new( upload: UnboundedReceiver<UploadRequest>, download: UnboundedReceiver<DownloadRequest>, aggregate: UnboundedReceiver<AggregateRequest<A>>, select: UnboundedReceiver<SelectRequest<A>>, ) -> Self { let stream = download .map(Request::from) .merge(upload.map(Request::from)) .merge(aggregate.map(Request::from)) .merge(select.map(Request::from)); Self(Box::pin(stream)) } } #[derive(From)] pub struct UploadRequest { credentials: Credentials, data: Bytes, } #[derive(From)] pub struct DownloadRequest { credentials: Credentials, response_tx: oneshot::Sender<Result<Bytes, DownloadError>>, } #[derive(From)] pub struct AggregateRequest<A> where A: Aggregator, { response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub struct SelectRequest<A> where A: Aggregator, { credentials: Credentials, response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub enum Request<A> where A: Aggregator, { Upload(UploadRequest), Download(DownloadRequest), Aggregate(AggregateRequest<A>), Select(SelectRequest<A>), } pub struct ServiceHandle<A> where A: Aggregator, { upload: UnboundedSender<UploadRequest>, download: UnboundedSender<DownloadRequest>, aggregate: UnboundedSender<AggregateRequest<A>>, select: UnboundedSender<SelectRequest<A>>, } // We implement Clone manually because it can only be derived if A: // Clone, which we don't want. impl<A> Clone for ServiceHandle<A> where A: Aggregator, { fn clone(&self) -> Self { Self { upload: self.upload.clone(), download: self.download.clone(), aggregate: self.aggregate.clone(), select: self.select.clone(), } } } impl<A> ServiceHandle<A> where A: Aggregator +'static, { pub fn new() -> (Self, ServiceRequests<A>) { let (upload_tx, upload_rx) = unbounded_channel::<UploadRequest>(); let (download_tx, download_rx) = unbounded_channel::<DownloadRequest>(); let (aggregate_tx, aggregate_rx) = unbounded_channel::<AggregateRequest<A>>(); let (select_tx, select_rx) = unbounded_channel::<SelectRequest<A>>(); let handle = Self { upload: upload_tx, download: download_tx, aggregate: aggregate_tx, select: select_tx, }; let service_requests = ServiceRequests::new(upload_rx, download_rx, aggregate_rx, select_rx); (handle, service_requests) } pub async fn download( &self, credentials: Credentials, ) -> Result<Bytes, ServiceError<DownloadError>> { let (tx, rx) = oneshot::channel::<Result<Bytes, DownloadError>>(); let request = DownloadRequest::from((credentials, tx)); Self::send_request(request, &self.download)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn upload( &self, credentials: Credentials, data: Bytes, ) -> Result<(), ServiceError<UploadError>> { let request = UploadRequest::from((credentials, data)); Self::send_request(request, &self.upload)?; Ok(()) } pub async fn aggregate(&self) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(AggregateRequest::from(tx), &self.aggregate)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn select(&self, credentials: Credentials) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(SelectRequest::from((credentials, tx)), &self.select)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } fn send_request<P>(payload: P, tx: &UnboundedSender<P>) -> Result<(), ChannelError> { trace!("send request to the service"); if tx.send(payload).is_err() { warn!("failed to send request: channel closed"); Err(ChannelError::Request) } else { trace!("request sent"); Ok(()) } } async fn recv_response<R>(rx: oneshot::Receiver<R>) -> Result<R, ChannelError> { rx.await.map_err(|_| { warn!("could not receive response: channel closed"); ChannelError::Response }) } } #[derive(Error, Debug)] pub enum DownloadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum UploadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum ServiceError<E> where E: Error, { #[error("failed to send the request or receive the response")] Handle(#[from] ChannelError), #[error("request failed: {0}")] Request(E), } #[derive(Error, Debug)] pub enum ChannelError { #[error("failed to send request to Service")] Request, #[error("failed to receive the response from Service")] Response, }
random_line_split
service.rs
use crate::{ common::client::{ClientId, Credentials, Token}, coordinator, }; use bytes::Bytes; use derive_more::From; use futures::{ready, stream::Stream}; use std::{ collections::HashMap, error::Error, future::Future, pin::Pin, task::{Context, Poll}, }; use tarpc::context::current as rpc_context; use thiserror::Error; use tokio::{ stream::StreamExt, sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, oneshot, }, }; use tracing_futures::Instrument; use std::env; use std::fs::File; use std::io::prelude::*; use nix::sys::signal; use nix::unistd::Pid; use std::thread; use std::time::Duration; /// A future that orchestrates the entire aggregator service. // TODO: maybe add a HashSet or HashMap of clients who already // uploaded their weights to prevent a client from uploading weights // multiple times. Or we could just remove that ID from the // `allowed_ids` map. // TODO: maybe add a HashSet for clients that are already // downloading/uploading, to prevent DoS attacks. pub struct Service<A> where A: Aggregator, { /// Clients that the coordinator selected for the current /// round. They can use their unique token to download the global /// weights and upload their own local results once they finished /// training. allowed_ids: HashMap<ClientId, Token>, /// The latest global weights as computed by the aggregator. // NOTE: We could store this directly in the task that handles the // HTTP requests. I initially though that having it here would // make it easier to bypass the HTTP layer, which is convenient // for testing because we can simulate client with just // AggregatorHandles. But maybe that's just another layer of // complexity that is not worth it. global_weights: Bytes, /// The aggregator itself, which handles the weights or performs /// the aggregations. aggregator: A, /// A client for the coordinator RPC service. rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, aggregation_future: Option<AggregationFuture<A>>, model_number: usize, } /// This trait defines the methods that an aggregator should /// implement. pub trait Aggregator { type Error: Error + Send +'static + Sync; type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin; type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send +'static; /// Check the validity of the given weights and if they are valid, /// add them to the set of weights to aggregate. fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut; /// Run the aggregator and return the result. fn aggregate(&mut self) -> Self::AggregateFut; } impl<A> Service<A> where A: Aggregator, { pub fn new( aggregator: A, rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, ) -> Self { Self { aggregator, requests, rpc_client, allowed_ids: HashMap::new(), global_weights: Bytes::new(), aggregation_future: None, model_number: 0, } } /// Handle the incoming requests. fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> { trace!("polling requests"); loop { match ready!(Pin::new(&mut self.requests).poll_next(cx)) { Some(request) => self.handle_request(request), None => { trace!("no more request to handle"); return Poll::Ready(()); } } } } fn handle_download_request(&mut self, request: DownloadRequest) { debug!("handling download request"); let DownloadRequest { credentials, response_tx, } = request; if self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false) { let _ = response_tx.send(Ok(self.global_weights.clone())); } else { warn!("rejecting download request"); let _ = response_tx.send(Err(DownloadError::Unauthorized)); } } fn handle_upload_request(&mut self, request: UploadRequest) { debug!("handling upload request"); let UploadRequest { credentials, data } = request; let accept_upload = self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false); if!accept_upload { warn!("rejecting upload request"); return; } let mut rpc_client = self.rpc_client.clone(); let fut = self.aggregator.add_weights(data); tokio::spawn( async move { let result = fut.await; debug!("sending end training request to the coordinator"); rpc_client .end_training(rpc_context(), *credentials.id(), result.is_ok()) .await .map_err(|e| { warn!( "failed to send end training request to the coordinator: {}", e ); }) } .instrument(trace_span!("end_training_rpc_request")), ); } fn handle_request(&mut self, request: Request<A>) { match request { Request::Download(req) => self.handle_download_request(req), Request::Upload(req) => self.handle_upload_request(req), Request::Select(req) => self.handle_select_request(req), Request::Aggregate(req) => self.handle_aggregate_request(req), } } fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) { info!("handling aggregate request"); let AggregateRequest { response_tx } = request; self.allowed_ids = HashMap::new(); self.aggregation_future = Some(AggregationFuture { future: self.aggregator.aggregate(), response_tx, }); } fn handle_select_request(&mut self, request: SelectRequest<A>) { info!("handling select request"); let SelectRequest { credentials, response_tx, } = request; let (id, token) = credentials.into_parts(); self.allowed_ids.insert(id, token); if response_tx.send(Ok(())).is_err() { warn!("failed to send reponse: channel closed"); } } #[allow(clippy::cognitive_complexity)] fn poll_aggregation(&mut self, cx: &mut Context) { // Check if we're waiting for an aggregation, ie whether // there's a future to poll. let future = if let Some(future) = self.aggregation_future.take() { future } else { trace!("no aggregation future running: skipping polling"); return; }; trace!("polling aggregation future"); let AggregationFuture { mut future, response_tx, } = future; let result = match Pin::new(&mut future).poll(cx) { Poll::Ready(Ok(weights)) => { info!("aggregation succeeded, settings global weights"); self.global_weights = weights; if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") { let file_name = format!("{}/model_{}.npy", path, self.model_number); let mut file = File::create(&file_name).unwrap(); info!("Writing model {}", file_name); file.write_all(&self.global_weights).unwrap(); self.model_number += 1; } Ok(()) } Poll::Ready(Err(e)) => { error!(error = %e, "aggregation failed"); Err(e) } Poll::Pending => { debug!("aggregation future still running"); self.aggregation_future = Some(AggregationFuture { future, response_tx, }); return; } }; if response_tx.send(result).is_err() { error!("failed to send aggregation response to RPC task: receiver dropped"); } if self.model_number == 10 { thread::sleep(Duration::from_millis(10 * 1000)); signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap(); } } } struct AggregationFuture<A> where A: Aggregator, { future: A::AggregateFut, response_tx: oneshot::Sender<Result<(), A::Error>>, } impl<A> Future for Service<A> where A: Aggregator + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!("polling Service"); let pin = self.get_mut(); if let Poll::Ready(_) = pin.poll_requests(cx) { return Poll::Ready(()); } pin.poll_aggregation(cx); Poll::Pending } } pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>) where A: Aggregator; impl<A> Stream for ServiceRequests<A> where A: Aggregator, { type Item = Request<A>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { trace!("polling ServiceRequests"); self.0.as_mut().poll_next(cx) } } impl<A> ServiceRequests<A> where A: Aggregator +'static, { fn new( upload: UnboundedReceiver<UploadRequest>, download: UnboundedReceiver<DownloadRequest>, aggregate: UnboundedReceiver<AggregateRequest<A>>, select: UnboundedReceiver<SelectRequest<A>>, ) -> Self { let stream = download .map(Request::from) .merge(upload.map(Request::from)) .merge(aggregate.map(Request::from)) .merge(select.map(Request::from)); Self(Box::pin(stream)) } } #[derive(From)] pub struct UploadRequest { credentials: Credentials, data: Bytes, } #[derive(From)] pub struct DownloadRequest { credentials: Credentials, response_tx: oneshot::Sender<Result<Bytes, DownloadError>>, } #[derive(From)] pub struct AggregateRequest<A> where A: Aggregator, { response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub struct SelectRequest<A> where A: Aggregator, { credentials: Credentials, response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub enum Request<A> where A: Aggregator, { Upload(UploadRequest), Download(DownloadRequest), Aggregate(AggregateRequest<A>), Select(SelectRequest<A>), } pub struct ServiceHandle<A> where A: Aggregator, { upload: UnboundedSender<UploadRequest>, download: UnboundedSender<DownloadRequest>, aggregate: UnboundedSender<AggregateRequest<A>>, select: UnboundedSender<SelectRequest<A>>, } // We implement Clone manually because it can only be derived if A: // Clone, which we don't want. impl<A> Clone for ServiceHandle<A> where A: Aggregator, { fn clone(&self) -> Self
} impl<A> ServiceHandle<A> where A: Aggregator +'static, { pub fn new() -> (Self, ServiceRequests<A>) { let (upload_tx, upload_rx) = unbounded_channel::<UploadRequest>(); let (download_tx, download_rx) = unbounded_channel::<DownloadRequest>(); let (aggregate_tx, aggregate_rx) = unbounded_channel::<AggregateRequest<A>>(); let (select_tx, select_rx) = unbounded_channel::<SelectRequest<A>>(); let handle = Self { upload: upload_tx, download: download_tx, aggregate: aggregate_tx, select: select_tx, }; let service_requests = ServiceRequests::new(upload_rx, download_rx, aggregate_rx, select_rx); (handle, service_requests) } pub async fn download( &self, credentials: Credentials, ) -> Result<Bytes, ServiceError<DownloadError>> { let (tx, rx) = oneshot::channel::<Result<Bytes, DownloadError>>(); let request = DownloadRequest::from((credentials, tx)); Self::send_request(request, &self.download)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn upload( &self, credentials: Credentials, data: Bytes, ) -> Result<(), ServiceError<UploadError>> { let request = UploadRequest::from((credentials, data)); Self::send_request(request, &self.upload)?; Ok(()) } pub async fn aggregate(&self) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(AggregateRequest::from(tx), &self.aggregate)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn select(&self, credentials: Credentials) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(SelectRequest::from((credentials, tx)), &self.select)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } fn send_request<P>(payload: P, tx: &UnboundedSender<P>) -> Result<(), ChannelError> { trace!("send request to the service"); if tx.send(payload).is_err() { warn!("failed to send request: channel closed"); Err(ChannelError::Request) } else { trace!("request sent"); Ok(()) } } async fn recv_response<R>(rx: oneshot::Receiver<R>) -> Result<R, ChannelError> { rx.await.map_err(|_| { warn!("could not receive response: channel closed"); ChannelError::Response }) } } #[derive(Error, Debug)] pub enum DownloadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum UploadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum ServiceError<E> where E: Error, { #[error("failed to send the request or receive the response")] Handle(#[from] ChannelError), #[error("request failed: {0}")] Request(E), } #[derive(Error, Debug)] pub enum ChannelError { #[error("failed to send request to Service")] Request, #[error("failed to receive the response from Service")] Response, }
{ Self { upload: self.upload.clone(), download: self.download.clone(), aggregate: self.aggregate.clone(), select: self.select.clone(), } }
identifier_body
service.rs
use crate::{ common::client::{ClientId, Credentials, Token}, coordinator, }; use bytes::Bytes; use derive_more::From; use futures::{ready, stream::Stream}; use std::{ collections::HashMap, error::Error, future::Future, pin::Pin, task::{Context, Poll}, }; use tarpc::context::current as rpc_context; use thiserror::Error; use tokio::{ stream::StreamExt, sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, oneshot, }, }; use tracing_futures::Instrument; use std::env; use std::fs::File; use std::io::prelude::*; use nix::sys::signal; use nix::unistd::Pid; use std::thread; use std::time::Duration; /// A future that orchestrates the entire aggregator service. // TODO: maybe add a HashSet or HashMap of clients who already // uploaded their weights to prevent a client from uploading weights // multiple times. Or we could just remove that ID from the // `allowed_ids` map. // TODO: maybe add a HashSet for clients that are already // downloading/uploading, to prevent DoS attacks. pub struct Service<A> where A: Aggregator, { /// Clients that the coordinator selected for the current /// round. They can use their unique token to download the global /// weights and upload their own local results once they finished /// training. allowed_ids: HashMap<ClientId, Token>, /// The latest global weights as computed by the aggregator. // NOTE: We could store this directly in the task that handles the // HTTP requests. I initially though that having it here would // make it easier to bypass the HTTP layer, which is convenient // for testing because we can simulate client with just // AggregatorHandles. But maybe that's just another layer of // complexity that is not worth it. global_weights: Bytes, /// The aggregator itself, which handles the weights or performs /// the aggregations. aggregator: A, /// A client for the coordinator RPC service. rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, aggregation_future: Option<AggregationFuture<A>>, model_number: usize, } /// This trait defines the methods that an aggregator should /// implement. pub trait Aggregator { type Error: Error + Send +'static + Sync; type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin; type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send +'static; /// Check the validity of the given weights and if they are valid, /// add them to the set of weights to aggregate. fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut; /// Run the aggregator and return the result. fn aggregate(&mut self) -> Self::AggregateFut; } impl<A> Service<A> where A: Aggregator, { pub fn new( aggregator: A, rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, ) -> Self { Self { aggregator, requests, rpc_client, allowed_ids: HashMap::new(), global_weights: Bytes::new(), aggregation_future: None, model_number: 0, } } /// Handle the incoming requests. fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> { trace!("polling requests"); loop { match ready!(Pin::new(&mut self.requests).poll_next(cx)) { Some(request) => self.handle_request(request), None => { trace!("no more request to handle"); return Poll::Ready(()); } } } } fn handle_download_request(&mut self, request: DownloadRequest) { debug!("handling download request"); let DownloadRequest { credentials, response_tx, } = request; if self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false) { let _ = response_tx.send(Ok(self.global_weights.clone())); } else { warn!("rejecting download request"); let _ = response_tx.send(Err(DownloadError::Unauthorized)); } } fn handle_upload_request(&mut self, request: UploadRequest) { debug!("handling upload request"); let UploadRequest { credentials, data } = request; let accept_upload = self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false); if!accept_upload { warn!("rejecting upload request"); return; } let mut rpc_client = self.rpc_client.clone(); let fut = self.aggregator.add_weights(data); tokio::spawn( async move { let result = fut.await; debug!("sending end training request to the coordinator"); rpc_client .end_training(rpc_context(), *credentials.id(), result.is_ok()) .await .map_err(|e| { warn!( "failed to send end training request to the coordinator: {}", e ); }) } .instrument(trace_span!("end_training_rpc_request")), ); } fn handle_request(&mut self, request: Request<A>) { match request { Request::Download(req) => self.handle_download_request(req), Request::Upload(req) => self.handle_upload_request(req), Request::Select(req) => self.handle_select_request(req), Request::Aggregate(req) => self.handle_aggregate_request(req), } } fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) { info!("handling aggregate request"); let AggregateRequest { response_tx } = request; self.allowed_ids = HashMap::new(); self.aggregation_future = Some(AggregationFuture { future: self.aggregator.aggregate(), response_tx, }); } fn
(&mut self, request: SelectRequest<A>) { info!("handling select request"); let SelectRequest { credentials, response_tx, } = request; let (id, token) = credentials.into_parts(); self.allowed_ids.insert(id, token); if response_tx.send(Ok(())).is_err() { warn!("failed to send reponse: channel closed"); } } #[allow(clippy::cognitive_complexity)] fn poll_aggregation(&mut self, cx: &mut Context) { // Check if we're waiting for an aggregation, ie whether // there's a future to poll. let future = if let Some(future) = self.aggregation_future.take() { future } else { trace!("no aggregation future running: skipping polling"); return; }; trace!("polling aggregation future"); let AggregationFuture { mut future, response_tx, } = future; let result = match Pin::new(&mut future).poll(cx) { Poll::Ready(Ok(weights)) => { info!("aggregation succeeded, settings global weights"); self.global_weights = weights; if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") { let file_name = format!("{}/model_{}.npy", path, self.model_number); let mut file = File::create(&file_name).unwrap(); info!("Writing model {}", file_name); file.write_all(&self.global_weights).unwrap(); self.model_number += 1; } Ok(()) } Poll::Ready(Err(e)) => { error!(error = %e, "aggregation failed"); Err(e) } Poll::Pending => { debug!("aggregation future still running"); self.aggregation_future = Some(AggregationFuture { future, response_tx, }); return; } }; if response_tx.send(result).is_err() { error!("failed to send aggregation response to RPC task: receiver dropped"); } if self.model_number == 10 { thread::sleep(Duration::from_millis(10 * 1000)); signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap(); } } } struct AggregationFuture<A> where A: Aggregator, { future: A::AggregateFut, response_tx: oneshot::Sender<Result<(), A::Error>>, } impl<A> Future for Service<A> where A: Aggregator + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!("polling Service"); let pin = self.get_mut(); if let Poll::Ready(_) = pin.poll_requests(cx) { return Poll::Ready(()); } pin.poll_aggregation(cx); Poll::Pending } } pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>) where A: Aggregator; impl<A> Stream for ServiceRequests<A> where A: Aggregator, { type Item = Request<A>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { trace!("polling ServiceRequests"); self.0.as_mut().poll_next(cx) } } impl<A> ServiceRequests<A> where A: Aggregator +'static, { fn new( upload: UnboundedReceiver<UploadRequest>, download: UnboundedReceiver<DownloadRequest>, aggregate: UnboundedReceiver<AggregateRequest<A>>, select: UnboundedReceiver<SelectRequest<A>>, ) -> Self { let stream = download .map(Request::from) .merge(upload.map(Request::from)) .merge(aggregate.map(Request::from)) .merge(select.map(Request::from)); Self(Box::pin(stream)) } } #[derive(From)] pub struct UploadRequest { credentials: Credentials, data: Bytes, } #[derive(From)] pub struct DownloadRequest { credentials: Credentials, response_tx: oneshot::Sender<Result<Bytes, DownloadError>>, } #[derive(From)] pub struct AggregateRequest<A> where A: Aggregator, { response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub struct SelectRequest<A> where A: Aggregator, { credentials: Credentials, response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub enum Request<A> where A: Aggregator, { Upload(UploadRequest), Download(DownloadRequest), Aggregate(AggregateRequest<A>), Select(SelectRequest<A>), } pub struct ServiceHandle<A> where A: Aggregator, { upload: UnboundedSender<UploadRequest>, download: UnboundedSender<DownloadRequest>, aggregate: UnboundedSender<AggregateRequest<A>>, select: UnboundedSender<SelectRequest<A>>, } // We implement Clone manually because it can only be derived if A: // Clone, which we don't want. impl<A> Clone for ServiceHandle<A> where A: Aggregator, { fn clone(&self) -> Self { Self { upload: self.upload.clone(), download: self.download.clone(), aggregate: self.aggregate.clone(), select: self.select.clone(), } } } impl<A> ServiceHandle<A> where A: Aggregator +'static, { pub fn new() -> (Self, ServiceRequests<A>) { let (upload_tx, upload_rx) = unbounded_channel::<UploadRequest>(); let (download_tx, download_rx) = unbounded_channel::<DownloadRequest>(); let (aggregate_tx, aggregate_rx) = unbounded_channel::<AggregateRequest<A>>(); let (select_tx, select_rx) = unbounded_channel::<SelectRequest<A>>(); let handle = Self { upload: upload_tx, download: download_tx, aggregate: aggregate_tx, select: select_tx, }; let service_requests = ServiceRequests::new(upload_rx, download_rx, aggregate_rx, select_rx); (handle, service_requests) } pub async fn download( &self, credentials: Credentials, ) -> Result<Bytes, ServiceError<DownloadError>> { let (tx, rx) = oneshot::channel::<Result<Bytes, DownloadError>>(); let request = DownloadRequest::from((credentials, tx)); Self::send_request(request, &self.download)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn upload( &self, credentials: Credentials, data: Bytes, ) -> Result<(), ServiceError<UploadError>> { let request = UploadRequest::from((credentials, data)); Self::send_request(request, &self.upload)?; Ok(()) } pub async fn aggregate(&self) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(AggregateRequest::from(tx), &self.aggregate)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn select(&self, credentials: Credentials) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(SelectRequest::from((credentials, tx)), &self.select)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } fn send_request<P>(payload: P, tx: &UnboundedSender<P>) -> Result<(), ChannelError> { trace!("send request to the service"); if tx.send(payload).is_err() { warn!("failed to send request: channel closed"); Err(ChannelError::Request) } else { trace!("request sent"); Ok(()) } } async fn recv_response<R>(rx: oneshot::Receiver<R>) -> Result<R, ChannelError> { rx.await.map_err(|_| { warn!("could not receive response: channel closed"); ChannelError::Response }) } } #[derive(Error, Debug)] pub enum DownloadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum UploadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum ServiceError<E> where E: Error, { #[error("failed to send the request or receive the response")] Handle(#[from] ChannelError), #[error("request failed: {0}")] Request(E), } #[derive(Error, Debug)] pub enum ChannelError { #[error("failed to send request to Service")] Request, #[error("failed to receive the response from Service")] Response, }
handle_select_request
identifier_name
service.rs
use crate::{ common::client::{ClientId, Credentials, Token}, coordinator, }; use bytes::Bytes; use derive_more::From; use futures::{ready, stream::Stream}; use std::{ collections::HashMap, error::Error, future::Future, pin::Pin, task::{Context, Poll}, }; use tarpc::context::current as rpc_context; use thiserror::Error; use tokio::{ stream::StreamExt, sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, oneshot, }, }; use tracing_futures::Instrument; use std::env; use std::fs::File; use std::io::prelude::*; use nix::sys::signal; use nix::unistd::Pid; use std::thread; use std::time::Duration; /// A future that orchestrates the entire aggregator service. // TODO: maybe add a HashSet or HashMap of clients who already // uploaded their weights to prevent a client from uploading weights // multiple times. Or we could just remove that ID from the // `allowed_ids` map. // TODO: maybe add a HashSet for clients that are already // downloading/uploading, to prevent DoS attacks. pub struct Service<A> where A: Aggregator, { /// Clients that the coordinator selected for the current /// round. They can use their unique token to download the global /// weights and upload their own local results once they finished /// training. allowed_ids: HashMap<ClientId, Token>, /// The latest global weights as computed by the aggregator. // NOTE: We could store this directly in the task that handles the // HTTP requests. I initially though that having it here would // make it easier to bypass the HTTP layer, which is convenient // for testing because we can simulate client with just // AggregatorHandles. But maybe that's just another layer of // complexity that is not worth it. global_weights: Bytes, /// The aggregator itself, which handles the weights or performs /// the aggregations. aggregator: A, /// A client for the coordinator RPC service. rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, aggregation_future: Option<AggregationFuture<A>>, model_number: usize, } /// This trait defines the methods that an aggregator should /// implement. pub trait Aggregator { type Error: Error + Send +'static + Sync; type AggregateFut: Future<Output = Result<Bytes, Self::Error>> + Unpin; type AddWeightsFut: Future<Output = Result<(), Self::Error>> + Unpin + Send +'static; /// Check the validity of the given weights and if they are valid, /// add them to the set of weights to aggregate. fn add_weights(&mut self, weights: Bytes) -> Self::AddWeightsFut; /// Run the aggregator and return the result. fn aggregate(&mut self) -> Self::AggregateFut; } impl<A> Service<A> where A: Aggregator, { pub fn new( aggregator: A, rpc_client: coordinator::rpc::Client, requests: ServiceRequests<A>, ) -> Self { Self { aggregator, requests, rpc_client, allowed_ids: HashMap::new(), global_weights: Bytes::new(), aggregation_future: None, model_number: 0, } } /// Handle the incoming requests. fn poll_requests(&mut self, cx: &mut Context) -> Poll<()> { trace!("polling requests"); loop { match ready!(Pin::new(&mut self.requests).poll_next(cx)) { Some(request) => self.handle_request(request), None => { trace!("no more request to handle"); return Poll::Ready(()); } } } } fn handle_download_request(&mut self, request: DownloadRequest) { debug!("handling download request"); let DownloadRequest { credentials, response_tx, } = request; if self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false) { let _ = response_tx.send(Ok(self.global_weights.clone())); } else { warn!("rejecting download request"); let _ = response_tx.send(Err(DownloadError::Unauthorized)); } } fn handle_upload_request(&mut self, request: UploadRequest) { debug!("handling upload request"); let UploadRequest { credentials, data } = request; let accept_upload = self .allowed_ids .get(credentials.id()) .map(|expected_token| credentials.token() == expected_token) .unwrap_or(false); if!accept_upload { warn!("rejecting upload request"); return; } let mut rpc_client = self.rpc_client.clone(); let fut = self.aggregator.add_weights(data); tokio::spawn( async move { let result = fut.await; debug!("sending end training request to the coordinator"); rpc_client .end_training(rpc_context(), *credentials.id(), result.is_ok()) .await .map_err(|e| { warn!( "failed to send end training request to the coordinator: {}", e ); }) } .instrument(trace_span!("end_training_rpc_request")), ); } fn handle_request(&mut self, request: Request<A>) { match request { Request::Download(req) => self.handle_download_request(req), Request::Upload(req) => self.handle_upload_request(req), Request::Select(req) => self.handle_select_request(req), Request::Aggregate(req) => self.handle_aggregate_request(req), } } fn handle_aggregate_request(&mut self, request: AggregateRequest<A>) { info!("handling aggregate request"); let AggregateRequest { response_tx } = request; self.allowed_ids = HashMap::new(); self.aggregation_future = Some(AggregationFuture { future: self.aggregator.aggregate(), response_tx, }); } fn handle_select_request(&mut self, request: SelectRequest<A>) { info!("handling select request"); let SelectRequest { credentials, response_tx, } = request; let (id, token) = credentials.into_parts(); self.allowed_ids.insert(id, token); if response_tx.send(Ok(())).is_err() { warn!("failed to send reponse: channel closed"); } } #[allow(clippy::cognitive_complexity)] fn poll_aggregation(&mut self, cx: &mut Context) { // Check if we're waiting for an aggregation, ie whether // there's a future to poll. let future = if let Some(future) = self.aggregation_future.take() { future } else { trace!("no aggregation future running: skipping polling"); return; }; trace!("polling aggregation future"); let AggregationFuture { mut future, response_tx, } = future; let result = match Pin::new(&mut future).poll(cx) { Poll::Ready(Ok(weights)) => { info!("aggregation succeeded, settings global weights"); self.global_weights = weights; if let Ok(path) = env::var("NEVERMINED_OUTPUTS_PATH") { let file_name = format!("{}/model_{}.npy", path, self.model_number); let mut file = File::create(&file_name).unwrap(); info!("Writing model {}", file_name); file.write_all(&self.global_weights).unwrap(); self.model_number += 1; } Ok(()) } Poll::Ready(Err(e)) => { error!(error = %e, "aggregation failed"); Err(e) } Poll::Pending => { debug!("aggregation future still running"); self.aggregation_future = Some(AggregationFuture { future, response_tx, }); return; } }; if response_tx.send(result).is_err() { error!("failed to send aggregation response to RPC task: receiver dropped"); } if self.model_number == 10 { thread::sleep(Duration::from_millis(10 * 1000)); signal::kill(Pid::this(), signal::Signal::SIGINT).unwrap(); } } } struct AggregationFuture<A> where A: Aggregator, { future: A::AggregateFut, response_tx: oneshot::Sender<Result<(), A::Error>>, } impl<A> Future for Service<A> where A: Aggregator + Unpin, { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { trace!("polling Service"); let pin = self.get_mut(); if let Poll::Ready(_) = pin.poll_requests(cx)
pin.poll_aggregation(cx); Poll::Pending } } pub struct ServiceRequests<A>(Pin<Box<dyn Stream<Item = Request<A>> + Send>>) where A: Aggregator; impl<A> Stream for ServiceRequests<A> where A: Aggregator, { type Item = Request<A>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> { trace!("polling ServiceRequests"); self.0.as_mut().poll_next(cx) } } impl<A> ServiceRequests<A> where A: Aggregator +'static, { fn new( upload: UnboundedReceiver<UploadRequest>, download: UnboundedReceiver<DownloadRequest>, aggregate: UnboundedReceiver<AggregateRequest<A>>, select: UnboundedReceiver<SelectRequest<A>>, ) -> Self { let stream = download .map(Request::from) .merge(upload.map(Request::from)) .merge(aggregate.map(Request::from)) .merge(select.map(Request::from)); Self(Box::pin(stream)) } } #[derive(From)] pub struct UploadRequest { credentials: Credentials, data: Bytes, } #[derive(From)] pub struct DownloadRequest { credentials: Credentials, response_tx: oneshot::Sender<Result<Bytes, DownloadError>>, } #[derive(From)] pub struct AggregateRequest<A> where A: Aggregator, { response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub struct SelectRequest<A> where A: Aggregator, { credentials: Credentials, response_tx: oneshot::Sender<Result<(), A::Error>>, } #[derive(From)] pub enum Request<A> where A: Aggregator, { Upload(UploadRequest), Download(DownloadRequest), Aggregate(AggregateRequest<A>), Select(SelectRequest<A>), } pub struct ServiceHandle<A> where A: Aggregator, { upload: UnboundedSender<UploadRequest>, download: UnboundedSender<DownloadRequest>, aggregate: UnboundedSender<AggregateRequest<A>>, select: UnboundedSender<SelectRequest<A>>, } // We implement Clone manually because it can only be derived if A: // Clone, which we don't want. impl<A> Clone for ServiceHandle<A> where A: Aggregator, { fn clone(&self) -> Self { Self { upload: self.upload.clone(), download: self.download.clone(), aggregate: self.aggregate.clone(), select: self.select.clone(), } } } impl<A> ServiceHandle<A> where A: Aggregator +'static, { pub fn new() -> (Self, ServiceRequests<A>) { let (upload_tx, upload_rx) = unbounded_channel::<UploadRequest>(); let (download_tx, download_rx) = unbounded_channel::<DownloadRequest>(); let (aggregate_tx, aggregate_rx) = unbounded_channel::<AggregateRequest<A>>(); let (select_tx, select_rx) = unbounded_channel::<SelectRequest<A>>(); let handle = Self { upload: upload_tx, download: download_tx, aggregate: aggregate_tx, select: select_tx, }; let service_requests = ServiceRequests::new(upload_rx, download_rx, aggregate_rx, select_rx); (handle, service_requests) } pub async fn download( &self, credentials: Credentials, ) -> Result<Bytes, ServiceError<DownloadError>> { let (tx, rx) = oneshot::channel::<Result<Bytes, DownloadError>>(); let request = DownloadRequest::from((credentials, tx)); Self::send_request(request, &self.download)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn upload( &self, credentials: Credentials, data: Bytes, ) -> Result<(), ServiceError<UploadError>> { let request = UploadRequest::from((credentials, data)); Self::send_request(request, &self.upload)?; Ok(()) } pub async fn aggregate(&self) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(AggregateRequest::from(tx), &self.aggregate)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } pub async fn select(&self, credentials: Credentials) -> Result<(), ServiceError<A::Error>> { let (tx, rx) = oneshot::channel::<Result<(), A::Error>>(); Self::send_request(SelectRequest::from((credentials, tx)), &self.select)?; Self::recv_response(rx) .await? .map_err(ServiceError::Request) } fn send_request<P>(payload: P, tx: &UnboundedSender<P>) -> Result<(), ChannelError> { trace!("send request to the service"); if tx.send(payload).is_err() { warn!("failed to send request: channel closed"); Err(ChannelError::Request) } else { trace!("request sent"); Ok(()) } } async fn recv_response<R>(rx: oneshot::Receiver<R>) -> Result<R, ChannelError> { rx.await.map_err(|_| { warn!("could not receive response: channel closed"); ChannelError::Response }) } } #[derive(Error, Debug)] pub enum DownloadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum UploadError { #[error("the user does not have the proper permissions")] Unauthorized, } #[derive(Error, Debug)] pub enum ServiceError<E> where E: Error, { #[error("failed to send the request or receive the response")] Handle(#[from] ChannelError), #[error("request failed: {0}")] Request(E), } #[derive(Error, Debug)] pub enum ChannelError { #[error("failed to send request to Service")] Request, #[error("failed to receive the response from Service")] Response, }
{ return Poll::Ready(()); }
conditional_block
bg.rs
of all background layers. The cache is created lazily /// (when BG layer pixels are looked up), so we will not waste time caching a disabled BG layer. #[derive(Default)] pub struct BgCache { layers: [BgLayerCache; 4], } /// Data that's stored in the BG layer caches for a single pixel #[derive(Copy, Clone, Default)] struct CachedPixel { // These are just copied from `TilemapEntry`. /// Tile priority bit (0-1) priority: u8, /// Precalculated color of the pixel (15-bit RGB). `None` = transparent. color: Option<SnesRgb>, } /// BG cache for a single layer struct BgLayerCache { /// Whether this cache contains valid data. If `false`, the cache will be refreshed on next /// access. valid: bool, /// Stores the prerendered scanline scanline: [CachedPixel; super::SCREEN_WIDTH as usize], } impl Default for BgLayerCache { fn default() -> Self { BgLayerCache { valid: false, scanline: [CachedPixel::default(); super::SCREEN_WIDTH as usize], } } } impl BgLayerCache { /// Invalidates the cache of this layer, causing it to be rebuilt on next access. #[allow(dead_code)] // FIXME Use in the right locations fn
(&mut self) { self.valid = false; } } impl BgCache { /// Invalidates the BG cache of all layers fn invalidate_all(&mut self) { self.layers[0].valid = false; self.layers[1].valid = false; self.layers[2].valid = false; self.layers[3].valid = false; } } /// Collected background settings struct BgSettings { /// Mosaic pixel size (1-16). 1 = Normal pixels. /// FIXME: I think there's a difference between disabled and enabled with 1x1 mosaic size in /// some modes (highres presumably) #[allow(dead_code)] // FIXME NYI mosaic: u8, /// Tilemap word address in VRAM /// "Starting at the tilemap address, the first $800 bytes are for tilemap A. Then come the /// $800 bytes for B, then C then D." tilemap_word_addr: u16, /// When `true`, this BGs tilemaps are repeated sideways tilemap_mirror_h: bool, /// When `true`, this BGs tilemaps are repeated downwards tilemap_mirror_v: bool, /// If `true`, BG tiles are 16x16 pixels. If `false`, they are 8x8 pixels. tile_size_16: bool, /// Character Data start address in VRAM chr_addr: u16, /// Horizontal scroll offset. Moves the BG layer to the left by some number of pixels. hofs: u16, /// Vertical scroll offset. Moves the BG layer up by some number of pixels. vofs: u16, } /// Unpacked tilemap entry for internal (rendering) use. /// /// A tilemap entry is 2 bytes large and contains informations about a single background layer tile. struct TilemapEntry { /// Flip this tile vertically (flips top and down of the tile) vflip: bool, /// Flip horizontally (flips left and right side) hflip: bool, /// Priority bit (0-1) priority: u8, /// Tile palette (0-7) palette: u8, /// Index into the character/tile data, where the actual tile character data is stored in /// bitplanes (10 bits) tile_number: u16, } impl Ppu { /// Determines whether the given BG layer (1-4) is enabled fn bg_enabled(&self, bg: u8, subscreen: bool) -> bool { let reg = if subscreen { self.ts } else { self.tm }; reg & (1 << (bg - 1))!= 0 } /// Reads the tilemap entry at the given VRAM word address. /// vhopppcc cccccccc (high, low) /// v/h = Vertical/Horizontal flip this tile. /// o = Tile priority. /// ppp = Tile palette base. /// cccccccccc = Tile number. fn tilemap_entry(&self, word_address: u16) -> TilemapEntry { let byte_address = word_address << 1; let lo = self.vram[byte_address]; let hi = self.vram[byte_address + 1]; TilemapEntry { vflip: hi & 0x80!= 0, hflip: hi & 0x40!= 0, priority: (hi & 0x20) >> 5, palette: (hi & 0x1c) >> 2, tile_number: ((hi as u16 & 0x03) << 8) | lo as u16, } } /// Collects properties of a background layer fn bg_settings(&self, bg: u8) -> BgSettings { // The BGxSC register for our background layer let bgsc = match bg { 1 => self.bg1sc, 2 => self.bg2sc, 3 => self.bg3sc, 4 => self.bg4sc, _ => unreachable!(), }; // Chr (Tileset, not Tilemap) start (word?) address >> 12 let chr = match bg { 1 => self.bg12nba & 0x0f, 2 => (self.bg12nba & 0xf0) >> 4, 3 => self.bg34nba & 0x0f, 4 => (self.bg34nba & 0xf0) >> 4, _ => unreachable!(), }; let (hofs, vofs) = match bg { 1 => (self.bg1hofs, self.bg1vofs), 2 => (self.bg2hofs, self.bg2vofs), 3 => (self.bg3hofs, self.bg3vofs), 4 => (self.bg4hofs, self.bg4vofs), _ => unreachable!(), }; BgSettings { mosaic: if self.mosaic & (1 << (bg-1)) == 0 { 1 } else { ((self.mosaic & 0xf0) >> 4) + 1 }, tilemap_word_addr: ((bgsc as u16 & 0xfc) >> 2) << 10, tilemap_mirror_h: bgsc & 0b01 == 0, // inverted bit value tilemap_mirror_v: bgsc & 0b10 == 0, // inverted bit value tile_size_16: match self.bg_mode() { // "If the BG character size for BG1/BG2/BG3/BG4 bit is set, then the BG is made of // 16x16 tiles. Otherwise, 8x8 tiles are used. However, note that Modes 5 and 6 // always use 16-pixel wide tiles, and Mode 7 always uses 8x8 tiles." 5 | 6 => true, 7 => false, _ => { // BGMODE: `4321----` (`-` = not relevant here) - Use 16x16 tiles? self.bgmode & (1 << (bg + 3))!= 0 } }, chr_addr: (chr as u16) << 12, hofs: hofs, vofs: vofs, } } /// Returns the number of color bits in the given BG layer in the current BG mode (2, 4, 7 or /// 8). To get the number of colors, use `1 << color_bits_for_bg`. /// /// Table of colors for BG layers (not what this function returns!). `X` denotes a BG for /// offset-per-tile data. /// ```text /// Mode # Colors for BG /// 1 2 3 4 /// ======---=---=---=---= /// 0 4 4 4 4 /// 1 16 16 4 - /// 2 16 16 X - /// 3 256 16 - - /// 4 256 4 X - /// 5 16 4 - - /// 6 16 - X - /// 7 256 - - - /// 7EXTBG 256 128 - - /// ``` fn color_bits_for_bg(&self, bg: u8) -> u8 { match (self.bg_mode(), bg) { (0, _) => 2, (1, 1) | (1, 2) => 4, (1, 3) => 2, (2, _) => 4, (3, 1) => 8, (3, 2) => 4, (4, 1) => 8, (4, 2) => 2, (5, 1) => 4, (5, 2) => 2, (6, _) => 4, (7, _) => panic!("unreachable: color_count_for_bg for mode 7"), _ => unreachable!(), } } /// Calculates the palette base index for a tile in the given background layer. `palette_num` /// is the palette number stored in the tilemap entry (the 3 `p` bits). fn palette_base_for_bg_tile(&self, bg: u8, palette_num: u8) -> u8 { debug_assert!(bg >= 1 && bg <= 4); match (self.bg_mode(), bg) { (0, _) => palette_num * 4 + (bg - 1) * 32, (1, _) | (5, _) => palette_num * (1 << self.color_bits_for_bg(bg) as u8), (2, _) => palette_num * 16, (3, 1) => 0, (3, 2) => palette_num * 16, (4, 1) => 0, (4, 2) => palette_num * 4, (6, _) => palette_num * 16, // BG1 has 16 colors (7, _) => panic!("unreachable: palette_base_for_bg_tile for mode 7"), _ => unreachable!(), } } fn render_mode7_scanline(&mut self) { // TODO Figure out how to integrate EXTBG assert!(self.setini & 0x40 == 0, "NYI: Mode 7 EXTBG"); // FIXME consider changing the type of `Ppu.m7a,...` to `i16` let vflip = self.m7sel & 0x02!= 0; let hflip = self.m7sel & 0x01!= 0; // 0/1: Wrap // 2: Transparent // 3: Fill with tile 0 let screen_over = self.m7sel >> 6; let y = self.scanline; for x in self.x..super::SCREEN_WIDTH as u16 { // Code taken from http://problemkaputt.de/fullsnes.htm // FIXME: The above source also has a much faster way to render whole scanlines! let screen_x = x ^ if hflip { 0xff } else { 0x00 }; let screen_y = y ^ if vflip { 0xff } else { 0x00 }; let mut org_x = (self.m7hofs as i16 - self.m7x as i16) &!0x1c00; if org_x < 0 { org_x |= 0x1c00; } let mut org_y = (self.m7vofs as i16 - self.m7y as i16) &!0x1c00; if org_y < 0 { org_y |= 0x1c00; } let mut vram_x: i32 = ((self.m7a as i16 as i32 * org_x as i32) &!0x3f) + ((self.m7b as i16 as i32 * org_y as i32) &!0x3f) + self.m7x as i16 as i32 * 0x100; let mut vram_y: i32 = ((self.m7c as i16 as i32 * org_x as i32) &!0x3f) + ((self.m7d as i16 as i32 * org_y as i32) &!0x3f) + self.m7y as i16 as i32 * 0x100; vram_x += ((self.m7b as i16 as i32 * screen_y as i32) &!0x3f) + self.m7a as i16 as i32 * screen_x as i32; vram_y += ((self.m7d as i16 as i32 * screen_y as i32) &!0x3f) + self.m7c as i16 as i32 * screen_x as i32; let out_of_bounds = vram_x & (1 << 18)!= 0 || vram_y & (1 << 18)!= 0; let palette_index = match screen_over { 2 if out_of_bounds => { // transparent 0 }, _ => { let (tile_x, tile_y) = if screen_over == 3 && out_of_bounds { (0, 0) // 3 -> use tile 0 } else { let tile_x: u16 = ((vram_x as u32 >> 11) & 0x7f) as u16; let tile_y: u16 = ((vram_y as u32 >> 11) & 0x7f) as u16; (tile_x, tile_y) }; let off_x: u16 = (vram_x as u16 >> 8) & 0x07; let off_y: u16 = (vram_y as u16 >> 8) & 0x07; // Tilemap address for (7-bit) tile X/Y coordinates (BG1 is 128x128 tiles): // `0yyyyyyy xxxxxxx0` let tilemap_addr: u16 = (tile_y << 8) | (tile_x << 1); // The "tilemap" in mode 7 just consists of "tile numbers" (or pixel addresses) let tile_number = self.vram[tilemap_addr] as u16; // The CHR address is calculated like this (where `t` is `tile_number` and `x` and `y` // are pixel offsets inside the tile): // `tttttttt tyyyxxx1` let chr_addr = (tile_number << 7) | (off_y << 4) | (off_x << 1) | 1; self.vram[chr_addr] }, }; let rgb = match palette_index { 0 => None, _ => Some(self.cgram.get_color(palette_index)), }; self.bg_cache.layers[0].scanline[x as usize] = CachedPixel { priority: 0, // Ignored anyways color: rgb, }; } } /// Render the current scanline of the given BG layer into its cache. /// /// We render starting at `self.x` (the pixel we actually need) until the end of the /// scanline. Note that this means that the `valid` flag is only relevant for the /// leftover part of the scanline, not the entire cached scanline. fn render_bg_scanline(&mut self, bg_num: u8) { // Apply BG scrolling and get the tile coordinates // FIXME Apply mosaic filter // FIXME Fix this: "Note that many games will set their vertical scroll values to -1 rather // than 0. This is because the SNES loads OBJ data for each scanline during the previous // scanline. The very first line, though, wouldn’t have any OBJ data loaded! So the SNES // doesn’t actually output scanline 0, although it does everything to render it. These // games want the first line of their tilemap to be the first line output, so they set // their VOFS registers in this manner. Note that an interlace screen needs -2 rather than // -1 to properly correct for the missing line 0 (and an emulator would need to add 2 // instead of 1 to account for this)." // -> I guess we should just decrement the physical screen height by 1 if self.bg_mode() == 7 { self.render_mode7_scanline(); return; } let mut x = self.x; let y = self.scanline; let bg = self.bg_settings(bg_num); let tile_size = if bg.tile_size_16 { 16 } else { 8 }; let (hofs, vofs) = (bg.hofs, bg.vofs); let (sx, sy) = (!bg.tilemap_mirror_h,!bg.tilemap_mirror_v); let color_bits = self.color_bits_for_bg(bg_num); if color_bits == 8 { // can use direct color mode debug_assert!(self.cgwsel & 0x01 == 0, "NYI: direct color mode"); } let mut tile_x = x.wrapping_add(hofs) / tile_size as u16; let tile_y = y.wrapping_add(vofs) / tile_size as u16; let mut off_x = (x.wrapping_add(hofs) % tile_size as u16) as u8; let off_y = (y.wrapping_add(vofs) % tile_size as u16) as u8; while x < super::SCREEN_WIDTH as u16 { // Render current tile (`tile_x`) starting at `off_x` until the end of the tile, // then go to next tile and set `off_x = 0` // Calculate the VRAM word address, where the tilemap entry for our tile is stored let tilemap_entry_word_address = bg.tilemap_word_addr | ((tile_y & 0x1f) << 5) | (tile_x & 0x1f) | if sy {(tile_y & 0x20) << if sx {6} else {5}} else {0} | if sx {(tile_x & 0x20) << 5} else {0}; let tilemap_entry = self.tilemap_entry(tilemap_entry_word_address); let bitplane_start_addr = (bg.chr_addr << 1) + (tilemap_entry.tile_number * 8 * color_bits as u16); // 8 bytes per bitplane let palette_base = self.palette_base_for_bg_tile(bg_num, tilemap_entry.palette); while off_x < tile_size && x < super::SCREEN_WIDTH as u16 { let palette_index = self.read_chr_entry(color_bits, bitplane_start_addr, tile_size, (off_x, off_y), (tilemap_entry.vflip, tilemap_entry.hflip)); let rgb = match palette_index { 0 => None, _ => Some(self.cgram.get_color(palette_base + palette_index)), }; self.bg_cache.layers[bg_num as usize - 1].scanline[x as usize] = CachedPixel { priority: tilemap_entry.priority, color: rgb, }; x += 1; off_x += 1; } tile_x += 1; off_x = 0; } } /// Main entry point into the BG layer renderer. /// /// Lookup the color of the given background layer (1-4) at the current pixel, using the given /// priority (0-1) only. This will also scroll backgrounds accordingly. /// /// This may only be called with BG layer numbers which are actually valid in the current BG /// mode (the renderer code makes sure that this is the case). /// /// Returns `None` if the pixel is transparent, `Some(SnesRgb)` otherwise. pub fn lookup_bg_color(&mut self, bg_num: u8, prio: u8, subscreen: bool) -> Option<SnesRgb> { debug_assert!(bg_num >= 1 && bg_num <= 4); debug_assert!(prio == 0 || prio == 1); if!self.bg_enabled(bg_num, subscreen) { return None; } if self.x == 0 { // Before we draw the first pixel, make sure that we invalidate the cache so it is // rebuilt first. self.bg_cache.invalidate_all(); } if!self.bg_cache.layers[bg_num as usize - 1].valid { // Call actual render code to render the scanline into the cache self.render_bg_scanline(bg_num); self.bg_cache.layers[bg_num as usize - 1].valid = true; } // Cache must be valid now, so we can access the pixel we need: let pixel = &self.bg_cache.layers
invalidate
identifier_name
bg.rs
line of all background layers. The cache is created lazily /// (when BG layer pixels are looked up), so we will not waste time caching a disabled BG layer. #[derive(Default)] pub struct BgCache { layers: [BgLayerCache; 4], } /// Data that's stored in the BG layer caches for a single pixel #[derive(Copy, Clone, Default)] struct CachedPixel { // These are just copied from `TilemapEntry`. /// Tile priority bit (0-1) priority: u8, /// Precalculated color of the pixel (15-bit RGB). `None` = transparent. color: Option<SnesRgb>, } /// BG cache for a single layer struct BgLayerCache { /// Whether this cache contains valid data. If `false`, the cache will be refreshed on next /// access. valid: bool, /// Stores the prerendered scanline scanline: [CachedPixel; super::SCREEN_WIDTH as usize], } impl Default for BgLayerCache { fn default() -> Self { BgLayerCache { valid: false, scanline: [CachedPixel::default(); super::SCREEN_WIDTH as usize], } } } impl BgLayerCache { /// Invalidates the cache of this layer, causing it to be rebuilt on next access. #[allow(dead_code)] // FIXME Use in the right locations fn invalidate(&mut self) { self.valid = false; } } impl BgCache { /// Invalidates the BG cache of all layers fn invalidate_all(&mut self) { self.layers[0].valid = false; self.layers[1].valid = false; self.layers[2].valid = false; self.layers[3].valid = false; } } /// Collected background settings struct BgSettings { /// Mosaic pixel size (1-16). 1 = Normal pixels. /// FIXME: I think there's a difference between disabled and enabled with 1x1 mosaic size in /// some modes (highres presumably) #[allow(dead_code)] // FIXME NYI mosaic: u8, /// Tilemap word address in VRAM /// "Starting at the tilemap address, the first $800 bytes are for tilemap A. Then come the /// $800 bytes for B, then C then D." tilemap_word_addr: u16, /// When `true`, this BGs tilemaps are repeated sideways tilemap_mirror_h: bool, /// When `true`, this BGs tilemaps are repeated downwards tilemap_mirror_v: bool, /// If `true`, BG tiles are 16x16 pixels. If `false`, they are 8x8 pixels. tile_size_16: bool, /// Character Data start address in VRAM chr_addr: u16, /// Horizontal scroll offset. Moves the BG layer to the left by some number of pixels. hofs: u16, /// Vertical scroll offset. Moves the BG layer up by some number of pixels. vofs: u16, } /// Unpacked tilemap entry for internal (rendering) use. /// /// A tilemap entry is 2 bytes large and contains informations about a single background layer tile. struct TilemapEntry { /// Flip this tile vertically (flips top and down of the tile) vflip: bool, /// Flip horizontally (flips left and right side) hflip: bool, /// Priority bit (0-1) priority: u8, /// Tile palette (0-7) palette: u8, /// Index into the character/tile data, where the actual tile character data is stored in /// bitplanes (10 bits) tile_number: u16, } impl Ppu { /// Determines whether the given BG layer (1-4) is enabled fn bg_enabled(&self, bg: u8, subscreen: bool) -> bool { let reg = if subscreen { self.ts } else { self.tm }; reg & (1 << (bg - 1))!= 0 } /// Reads the tilemap entry at the given VRAM word address. /// vhopppcc cccccccc (high, low) /// v/h = Vertical/Horizontal flip this tile. /// o = Tile priority. /// ppp = Tile palette base. /// cccccccccc = Tile number. fn tilemap_entry(&self, word_address: u16) -> TilemapEntry { let byte_address = word_address << 1; let lo = self.vram[byte_address]; let hi = self.vram[byte_address + 1]; TilemapEntry { vflip: hi & 0x80!= 0, hflip: hi & 0x40!= 0, priority: (hi & 0x20) >> 5, palette: (hi & 0x1c) >> 2, tile_number: ((hi as u16 & 0x03) << 8) | lo as u16, } } /// Collects properties of a background layer fn bg_settings(&self, bg: u8) -> BgSettings { // The BGxSC register for our background layer let bgsc = match bg { 1 => self.bg1sc, 2 => self.bg2sc, 3 => self.bg3sc, 4 => self.bg4sc, _ => unreachable!(), }; // Chr (Tileset, not Tilemap) start (word?) address >> 12 let chr = match bg { 1 => self.bg12nba & 0x0f, 2 => (self.bg12nba & 0xf0) >> 4, 3 => self.bg34nba & 0x0f, 4 => (self.bg34nba & 0xf0) >> 4, _ => unreachable!(), }; let (hofs, vofs) = match bg { 1 => (self.bg1hofs, self.bg1vofs), 2 => (self.bg2hofs, self.bg2vofs), 3 => (self.bg3hofs, self.bg3vofs), 4 => (self.bg4hofs, self.bg4vofs), _ => unreachable!(), }; BgSettings { mosaic: if self.mosaic & (1 << (bg-1)) == 0 { 1 } else { ((self.mosaic & 0xf0) >> 4) + 1 }, tilemap_word_addr: ((bgsc as u16 & 0xfc) >> 2) << 10, tilemap_mirror_h: bgsc & 0b01 == 0, // inverted bit value tilemap_mirror_v: bgsc & 0b10 == 0, // inverted bit value tile_size_16: match self.bg_mode() { // "If the BG character size for BG1/BG2/BG3/BG4 bit is set, then the BG is made of // 16x16 tiles. Otherwise, 8x8 tiles are used. However, note that Modes 5 and 6 // always use 16-pixel wide tiles, and Mode 7 always uses 8x8 tiles." 5 | 6 => true, 7 => false, _ => { // BGMODE: `4321----` (`-` = not relevant here) - Use 16x16 tiles? self.bgmode & (1 << (bg + 3))!= 0 } }, chr_addr: (chr as u16) << 12, hofs: hofs, vofs: vofs, } } /// Returns the number of color bits in the given BG layer in the current BG mode (2, 4, 7 or /// 8). To get the number of colors, use `1 << color_bits_for_bg`. /// /// Table of colors for BG layers (not what this function returns!). `X` denotes a BG for /// offset-per-tile data. /// ```text /// Mode # Colors for BG /// 1 2 3 4 /// ======---=---=---=---= /// 0 4 4 4 4 /// 1 16 16 4 - /// 2 16 16 X - /// 3 256 16 - - /// 4 256 4 X - /// 5 16 4 - - /// 6 16 - X - /// 7 256 - - - /// 7EXTBG 256 128 - - /// ``` fn color_bits_for_bg(&self, bg: u8) -> u8 { match (self.bg_mode(), bg) { (0, _) => 2, (1, 1) | (1, 2) => 4, (1, 3) => 2, (2, _) => 4, (3, 1) => 8, (3, 2) => 4, (4, 1) => 8, (4, 2) => 2, (5, 1) => 4, (5, 2) => 2, (6, _) => 4, (7, _) => panic!("unreachable: color_count_for_bg for mode 7"), _ => unreachable!(), } } /// Calculates the palette base index for a tile in the given background layer. `palette_num` /// is the palette number stored in the tilemap entry (the 3 `p` bits). fn palette_base_for_bg_tile(&self, bg: u8, palette_num: u8) -> u8 { debug_assert!(bg >= 1 && bg <= 4); match (self.bg_mode(), bg) { (0, _) => palette_num * 4 + (bg - 1) * 32, (1, _) | (5, _) => palette_num * (1 << self.color_bits_for_bg(bg) as u8), (2, _) => palette_num * 16, (3, 1) => 0, (3, 2) => palette_num * 16, (4, 1) => 0, (4, 2) => palette_num * 4, (6, _) => palette_num * 16, // BG1 has 16 colors (7, _) => panic!("unreachable: palette_base_for_bg_tile for mode 7"), _ => unreachable!(), } } fn render_mode7_scanline(&mut self) { // TODO Figure out how to integrate EXTBG assert!(self.setini & 0x40 == 0, "NYI: Mode 7 EXTBG"); // FIXME consider changing the type of `Ppu.m7a,...` to `i16` let vflip = self.m7sel & 0x02!= 0; let hflip = self.m7sel & 0x01!= 0; // 0/1: Wrap // 2: Transparent // 3: Fill with tile 0 let screen_over = self.m7sel >> 6; let y = self.scanline; for x in self.x..super::SCREEN_WIDTH as u16 { // Code taken from http://problemkaputt.de/fullsnes.htm // FIXME: The above source also has a much faster way to render whole scanlines! let screen_x = x ^ if hflip { 0xff } else { 0x00 }; let screen_y = y ^ if vflip { 0xff } else { 0x00 }; let mut org_x = (self.m7hofs as i16 - self.m7x as i16) &!0x1c00; if org_x < 0 { org_x |= 0x1c00; } let mut org_y = (self.m7vofs as i16 - self.m7y as i16) &!0x1c00; if org_y < 0 { org_y |= 0x1c00; } let mut vram_x: i32 = ((self.m7a as i16 as i32 * org_x as i32) &!0x3f) + ((self.m7b as i16 as i32 * org_y as i32) &!0x3f) + self.m7x as i16 as i32 * 0x100; let mut vram_y: i32 = ((self.m7c as i16 as i32 * org_x as i32) &!0x3f) + ((self.m7d as i16 as i32 * org_y as i32) &!0x3f) + self.m7y as i16 as i32 * 0x100; vram_x += ((self.m7b as i16 as i32 * screen_y as i32) &!0x3f) + self.m7a as i16 as i32 * screen_x as i32; vram_y += ((self.m7d as i16 as i32 * screen_y as i32) &!0x3f) + self.m7c as i16 as i32 * screen_x as i32; let out_of_bounds = vram_x & (1 << 18)!= 0 || vram_y & (1 << 18)!= 0; let palette_index = match screen_over { 2 if out_of_bounds => { // transparent 0 }, _ => { let (tile_x, tile_y) = if screen_over == 3 && out_of_bounds { (0, 0) // 3 -> use tile 0 } else { let tile_x: u16 = ((vram_x as u32 >> 11) & 0x7f) as u16; let tile_y: u16 = ((vram_y as u32 >> 11) & 0x7f) as u16; (tile_x, tile_y) }; let off_x: u16 = (vram_x as u16 >> 8) & 0x07; let off_y: u16 = (vram_y as u16 >> 8) & 0x07; // Tilemap address for (7-bit) tile X/Y coordinates (BG1 is 128x128 tiles): // `0yyyyyyy xxxxxxx0` let tilemap_addr: u16 = (tile_y << 8) | (tile_x << 1); // The "tilemap" in mode 7 just consists of "tile numbers" (or pixel addresses) let tile_number = self.vram[tilemap_addr] as u16; // The CHR address is calculated like this (where `t` is `tile_number` and `x` and `y` // are pixel offsets inside the tile): // `tttttttt tyyyxxx1` let chr_addr = (tile_number << 7) | (off_y << 4) | (off_x << 1) | 1; self.vram[chr_addr] }, }; let rgb = match palette_index { 0 => None, _ => Some(self.cgram.get_color(palette_index)), }; self.bg_cache.layers[0].scanline[x as usize] = CachedPixel { priority: 0, // Ignored anyways color: rgb, }; } } /// Render the current scanline of the given BG layer into its cache. /// /// We render starting at `self.x` (the pixel we actually need) until the end of the /// scanline. Note that this means that the `valid` flag is only relevant for the /// leftover part of the scanline, not the entire cached scanline. fn render_bg_scanline(&mut self, bg_num: u8) { // Apply BG scrolling and get the tile coordinates // FIXME Apply mosaic filter // FIXME Fix this: "Note that many games will set their vertical scroll values to -1 rather // than 0. This is because the SNES loads OBJ data for each scanline during the previous // scanline. The very first line, though, wouldn’t have any OBJ data loaded! So the SNES // doesn’t actually output scanline 0, although it does everything to render it. These // games want the first line of their tilemap to be the first line output, so they set // their VOFS registers in this manner. Note that an interlace screen needs -2 rather than // -1 to properly correct for the missing line 0 (and an emulator would need to add 2 // instead of 1 to account for this)." // -> I guess we should just decrement the physical screen height by 1 if self.bg_mode() == 7 { self.render_mode7_scanline(); return; } let mut x = self.x; let y = self.scanline; let bg = self.bg_settings(bg_num); let tile_size = if bg.tile_size_16 { 16 } else { 8 }; let (hofs, vofs) = (bg.hofs, bg.vofs); let (sx, sy) = (!bg.tilemap_mirror_h,!bg.tilemap_mirror_v);
let color_bits = self.color_bits_for_bg(bg_num); if color_bits == 8 { // can use direct color mode debug_assert!(self.cgwsel & 0x01 == 0, "NYI: direct color mode"); } let mut tile_x = x.wrapping_add(hofs) / tile_size as u16; let tile_y = y.wrapping_add(vofs) / tile_size as u16; let mut off_x = (x.wrapping_add(hofs) % tile_size as u16) as u8; let off_y = (y.wrapping_add(vofs) % tile_size as u16) as u8; while x < super::SCREEN_WIDTH as u16 { // Render current tile (`tile_x`) starting at `off_x` until the end of the tile, // then go to next tile and set `off_x = 0` // Calculate the VRAM word address, where the tilemap entry for our tile is stored let tilemap_entry_word_address = bg.tilemap_word_addr | ((tile_y & 0x1f) << 5) | (tile_x & 0x1f) | if sy {(tile_y & 0x20) << if sx {6} else {5}} else {0} | if sx {(tile_x & 0x20) << 5} else {0}; let tilemap_entry = self.tilemap_entry(tilemap_entry_word_address); let bitplane_start_addr = (bg.chr_addr << 1) + (tilemap_entry.tile_number * 8 * color_bits as u16); // 8 bytes per bitplane let palette_base = self.palette_base_for_bg_tile(bg_num, tilemap_entry.palette); while off_x < tile_size && x < super::SCREEN_WIDTH as u16 { let palette_index = self.read_chr_entry(color_bits, bitplane_start_addr, tile_size, (off_x, off_y), (tilemap_entry.vflip, tilemap_entry.hflip)); let rgb = match palette_index { 0 => None, _ => Some(self.cgram.get_color(palette_base + palette_index)), }; self.bg_cache.layers[bg_num as usize - 1].scanline[x as usize] = CachedPixel { priority: tilemap_entry.priority, color: rgb, }; x += 1; off_x += 1; } tile_x += 1; off_x = 0; } } /// Main entry point into the BG layer renderer. /// /// Lookup the color of the given background layer (1-4) at the current pixel, using the given /// priority (0-1) only. This will also scroll backgrounds accordingly. /// /// This may only be called with BG layer numbers which are actually valid in the current BG /// mode (the renderer code makes sure that this is the case). /// /// Returns `None` if the pixel is transparent, `Some(SnesRgb)` otherwise. pub fn lookup_bg_color(&mut self, bg_num: u8, prio: u8, subscreen: bool) -> Option<SnesRgb> { debug_assert!(bg_num >= 1 && bg_num <= 4); debug_assert!(prio == 0 || prio == 1); if!self.bg_enabled(bg_num, subscreen) { return None; } if self.x == 0 { // Before we draw the first pixel, make sure that we invalidate the cache so it is // rebuilt first. self.bg_cache.invalidate_all(); } if!self.bg_cache.layers[bg_num as usize - 1].valid { // Call actual render code to render the scanline into the cache self.render_bg_scanline(bg_num); self.bg_cache.layers[bg_num as usize - 1].valid = true; } // Cache must be valid now, so we can access the pixel we need: let pixel = &self.bg_cache.layers[bg
random_line_split
topology.rs
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. use std::path::Path; use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read}; use std::fs::File; use std::collections::HashMap; use std::cmp::min; use std::time::Instant; use std::sync::Arc; use byteorder::{BigEndian, WriteBytesExt, ByteOrder}; pub struct SegmentList<T> { shift: usize, seg_size: usize, segments: Vec<Vec<T>>, current: Vec<T>, len: usize, } impl<T> SegmentList<T> { pub fn new(shift: usize) -> Self { let seg_size = 1 << shift; SegmentList { shift, seg_size, segments: Vec::new(), current: Vec::with_capacity(seg_size), len: 0, } } pub fn push(&mut self, e: T) { self.current.push(e); if self.current.len() == self.seg_size { self.segments.push(::std::mem::replace(&mut self.current, Vec::with_capacity(self.seg_size))); } self.len += 1; } pub fn get(&self, offset: usize) -> Option<&T> { let seg = offset >> self.shift; let offset = offset - self.seg_size * seg; if seg > self.segments.len() { None } else if seg == self.segments.len() { Some(&self.current[offset]) } else { Some(&self.segments[seg][offset]) } } pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> { let mut tmp = Vec::with_capacity(len); let mut seg = start >> self.shift; let offset = start - self.seg_size * seg; let mut left = len; let mut start = offset; while left > 0 { let end = min(left, self.seg_size - start); let read = self.get_in_seg(seg, start, end)?; for e in read.iter() { tmp.push(e); } seg += 1; start = 0; left -= read.len(); } Ok(tmp) } #[inline] pub fn len(&self) -> usize { self.len } #[inline] fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> { let end = start + len; if seg > self.segments.len() { Err("Index out of bound".to_owned()) } else if seg == self.segments.len() { if end > self.current.len() { Err("Index out of bound".to_owned()) } else { Ok(&self.current[start..end]) } } else { Ok(&self.segments[seg][start..end]) } } } /// 1 -> (2,3,4), /// 2 -> 3, /// 4 -> 5, /// 5 -> (1, 3), /// 6 -> (7, 8), /// 7 -> 8 const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)]; #[allow(dead_code)] pub struct GraphTopology { partition: u32, peers: u32, count: usize, neighbors: HashMap<u64, Arc<Vec<u64>>>, } #[derive(Clone, Serialize, Deserialize, Debug, Abomonation)] pub struct Vertex { pub id: u64, #[cfg(feature = "padding")] padding_1: [u64; 8], #[cfg(feature = "padding")] padding_2: [u64; 7], } impl Vertex { pub fn new(id: u64) -> Self { Vertex { id, #[cfg(feature = "padding")] padding_1: [0; 8], #[cfg(feature = "padding")] padding_2: [0; 7] } } } pub struct NeighborIter { cursor: usize, len: usize, inner: Arc<Vec<u64>> } impl NeighborIter { pub fn new(neighbors: &Arc<Vec<u64>>) -> Self { NeighborIter { cursor: 0, len: neighbors.len(), inner: neighbors.clone(), } } pub fn empty() -> Self { NeighborIter { cursor: 0, len: 0, inner: Arc::new(vec![]) } } } impl Iterator for NeighborIter { type Item = Vertex; fn next(&mut self) -> Option<Self::Item> { if self.cursor == self.len { None } else { self.cursor += 1; Some(Vertex::new(self.inner[self.cursor - 1])) } } } impl GraphTopology { pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self { let mut neighbors = HashMap::new(); let mut count = 0; for (s, d) in DEFAULT_GRAPH.iter() { if peers == 1 || (s % peers as u64) as u32 == partition { let n = neighbors.entry(*s).or_insert(Vec::new()); n.push(*d); count += 1; } if peers == 1 || (d % peers as u64) as u32 == partition { let n = neighbors.entry(*d).or_insert(Vec::new()); if!directed { n.push(*s); count += 1; } } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers, neighbors: arc_neighbors } } pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self { let as_bin = path.as_ref().with_extension("bin"); Self::convert_to_bin(path, as_bin.as_path(), split); info!("Convert raw file format to binary {:?}", as_bin.as_os_str()); Self::load_bin(partition, peers, directed, as_bin.as_path()) } /// Load graph from binary file. /// /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self { let mut reader = BufReader::new(File::open(path).unwrap()); //let mut reader = File::open(path).unwrap(); let mut neighbors = HashMap::new(); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); let mut buffer = [0u8;1<< 12]; let peers = peers as u64; loop { let read = match reader.read(&mut buffer[0..]) { Ok(n) => n, Err(e) => { if let ErrorKind::UnexpectedEof = e.kind() { break } else { panic!(e); } } }; if read > 0 { assert!(read % 8 == 0, "unexpected: read {} bytes", read); let valid = &mut buffer[0..read]; let mut extract = 0; while extract < read { let src = BigEndian::read_u64(&valid[extract..]); let dst = BigEndian::read_u64(&valid[extract + 8..]); if peers == 1 || (src % peers) as u32 == partition { let n = neighbors.entry(src).or_insert_with(|| Vec::new()); n.push(dst); } if!directed && (peers == 1 || (dst % peers) as u32 == partition) { let n = neighbors.entry(dst).or_insert_with(|| Vec::new()); n.push(src); } count += 1; if log::log_enabled!(log::Level::Debug) { if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } extract += 16; } } else { break } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers: peers as u32, neighbors: arc_neighbors, } } /// Convert graph file from raw text format to binary format. /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) { let reader = BufReader::new(File::open(input).unwrap()); let mut writer = BufWriter::new(File::create(output).unwrap()); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); for edge in reader.lines() { let edge = edge.unwrap(); let edge = edge.split(split).collect::<Vec<_>>(); let src: u64 = edge[0].parse().unwrap(); let dst: u64 = edge[1].parse().unwrap(); writer.write_u64::<BigEndian>(src).unwrap(); writer.write_u64::<BigEndian>(dst).unwrap(); count += 1; if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } writer.flush().unwrap(); } pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> { self.neighbors.get(src).map(|n| { NeighborIter::new(n) }) } #[inline] pub fn count_nodes(&self) -> usize
#[inline] pub fn count_edges(&self) -> usize { self.count } } #[cfg(test)] mod test { use super::*; use std::path::PathBuf; #[test] fn test_segment_list() { let mut list = SegmentList::new(6); for i in 0..1024 { list.push(i); } for i in 0..1024 { let e = list.get(i as usize).unwrap(); assert_eq!(i, *e); } for i in 0..1014 { let res = list.get_multi(i as usize, 10).unwrap(); //println!("get res {:?}", res); for j in 0..10 { assert_eq!(i + j, *res[j]); } } } #[test] fn test_graph_load() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); d.push("data/twitter_rv.net"); { println!("dir is : {}", d.display()); let graph = GraphTopology::load(1, 1, true,'', d.as_path()); println!("finish load"); let n = graph.get_neighbors(&12).unwrap() .fold(0, |count, _| count + 1); assert_eq!(n, 4); } { let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin")); let n = graph.get_neighbors(&12).unwrap() .map(|v| { println!("get v : {}", v.id); v }) .fold(0, |count, _| count + 1); assert_eq!(n, 4); } } #[test] fn test_graph() { let graph = GraphTopology::with_default(3, 1, true); { let mut ns = vec![]; for n in graph.get_neighbors(&1).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![2, 3, 4]); } { let mut ns = vec![]; for n in graph.get_neighbors(&6).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![7, 8]); } } }
{ self.neighbors.len() }
identifier_body
topology.rs
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. use std::path::Path; use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read}; use std::fs::File; use std::collections::HashMap; use std::cmp::min; use std::time::Instant; use std::sync::Arc; use byteorder::{BigEndian, WriteBytesExt, ByteOrder}; pub struct SegmentList<T> { shift: usize, seg_size: usize, segments: Vec<Vec<T>>, current: Vec<T>, len: usize, } impl<T> SegmentList<T> { pub fn new(shift: usize) -> Self { let seg_size = 1 << shift; SegmentList { shift, seg_size, segments: Vec::new(), current: Vec::with_capacity(seg_size), len: 0, } } pub fn push(&mut self, e: T) { self.current.push(e); if self.current.len() == self.seg_size { self.segments.push(::std::mem::replace(&mut self.current, Vec::with_capacity(self.seg_size))); } self.len += 1; } pub fn get(&self, offset: usize) -> Option<&T> { let seg = offset >> self.shift; let offset = offset - self.seg_size * seg; if seg > self.segments.len() { None } else if seg == self.segments.len() { Some(&self.current[offset]) } else { Some(&self.segments[seg][offset]) } } pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> { let mut tmp = Vec::with_capacity(len); let mut seg = start >> self.shift; let offset = start - self.seg_size * seg; let mut left = len; let mut start = offset; while left > 0 { let end = min(left, self.seg_size - start); let read = self.get_in_seg(seg, start, end)?; for e in read.iter() { tmp.push(e); } seg += 1; start = 0; left -= read.len(); } Ok(tmp) } #[inline] pub fn len(&self) -> usize { self.len } #[inline] fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> { let end = start + len; if seg > self.segments.len() { Err("Index out of bound".to_owned()) } else if seg == self.segments.len() { if end > self.current.len() { Err("Index out of bound".to_owned()) } else { Ok(&self.current[start..end]) } } else { Ok(&self.segments[seg][start..end]) } } } /// 1 -> (2,3,4), /// 2 -> 3, /// 4 -> 5, /// 5 -> (1, 3), /// 6 -> (7, 8), /// 7 -> 8 const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)]; #[allow(dead_code)] pub struct GraphTopology { partition: u32, peers: u32, count: usize, neighbors: HashMap<u64, Arc<Vec<u64>>>, } #[derive(Clone, Serialize, Deserialize, Debug, Abomonation)] pub struct Vertex { pub id: u64, #[cfg(feature = "padding")] padding_1: [u64; 8], #[cfg(feature = "padding")] padding_2: [u64; 7], } impl Vertex { pub fn
(id: u64) -> Self { Vertex { id, #[cfg(feature = "padding")] padding_1: [0; 8], #[cfg(feature = "padding")] padding_2: [0; 7] } } } pub struct NeighborIter { cursor: usize, len: usize, inner: Arc<Vec<u64>> } impl NeighborIter { pub fn new(neighbors: &Arc<Vec<u64>>) -> Self { NeighborIter { cursor: 0, len: neighbors.len(), inner: neighbors.clone(), } } pub fn empty() -> Self { NeighborIter { cursor: 0, len: 0, inner: Arc::new(vec![]) } } } impl Iterator for NeighborIter { type Item = Vertex; fn next(&mut self) -> Option<Self::Item> { if self.cursor == self.len { None } else { self.cursor += 1; Some(Vertex::new(self.inner[self.cursor - 1])) } } } impl GraphTopology { pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self { let mut neighbors = HashMap::new(); let mut count = 0; for (s, d) in DEFAULT_GRAPH.iter() { if peers == 1 || (s % peers as u64) as u32 == partition { let n = neighbors.entry(*s).or_insert(Vec::new()); n.push(*d); count += 1; } if peers == 1 || (d % peers as u64) as u32 == partition { let n = neighbors.entry(*d).or_insert(Vec::new()); if!directed { n.push(*s); count += 1; } } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers, neighbors: arc_neighbors } } pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self { let as_bin = path.as_ref().with_extension("bin"); Self::convert_to_bin(path, as_bin.as_path(), split); info!("Convert raw file format to binary {:?}", as_bin.as_os_str()); Self::load_bin(partition, peers, directed, as_bin.as_path()) } /// Load graph from binary file. /// /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self { let mut reader = BufReader::new(File::open(path).unwrap()); //let mut reader = File::open(path).unwrap(); let mut neighbors = HashMap::new(); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); let mut buffer = [0u8;1<< 12]; let peers = peers as u64; loop { let read = match reader.read(&mut buffer[0..]) { Ok(n) => n, Err(e) => { if let ErrorKind::UnexpectedEof = e.kind() { break } else { panic!(e); } } }; if read > 0 { assert!(read % 8 == 0, "unexpected: read {} bytes", read); let valid = &mut buffer[0..read]; let mut extract = 0; while extract < read { let src = BigEndian::read_u64(&valid[extract..]); let dst = BigEndian::read_u64(&valid[extract + 8..]); if peers == 1 || (src % peers) as u32 == partition { let n = neighbors.entry(src).or_insert_with(|| Vec::new()); n.push(dst); } if!directed && (peers == 1 || (dst % peers) as u32 == partition) { let n = neighbors.entry(dst).or_insert_with(|| Vec::new()); n.push(src); } count += 1; if log::log_enabled!(log::Level::Debug) { if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } extract += 16; } } else { break } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers: peers as u32, neighbors: arc_neighbors, } } /// Convert graph file from raw text format to binary format. /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) { let reader = BufReader::new(File::open(input).unwrap()); let mut writer = BufWriter::new(File::create(output).unwrap()); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); for edge in reader.lines() { let edge = edge.unwrap(); let edge = edge.split(split).collect::<Vec<_>>(); let src: u64 = edge[0].parse().unwrap(); let dst: u64 = edge[1].parse().unwrap(); writer.write_u64::<BigEndian>(src).unwrap(); writer.write_u64::<BigEndian>(dst).unwrap(); count += 1; if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } writer.flush().unwrap(); } pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> { self.neighbors.get(src).map(|n| { NeighborIter::new(n) }) } #[inline] pub fn count_nodes(&self) -> usize { self.neighbors.len() } #[inline] pub fn count_edges(&self) -> usize { self.count } } #[cfg(test)] mod test { use super::*; use std::path::PathBuf; #[test] fn test_segment_list() { let mut list = SegmentList::new(6); for i in 0..1024 { list.push(i); } for i in 0..1024 { let e = list.get(i as usize).unwrap(); assert_eq!(i, *e); } for i in 0..1014 { let res = list.get_multi(i as usize, 10).unwrap(); //println!("get res {:?}", res); for j in 0..10 { assert_eq!(i + j, *res[j]); } } } #[test] fn test_graph_load() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); d.push("data/twitter_rv.net"); { println!("dir is : {}", d.display()); let graph = GraphTopology::load(1, 1, true,'', d.as_path()); println!("finish load"); let n = graph.get_neighbors(&12).unwrap() .fold(0, |count, _| count + 1); assert_eq!(n, 4); } { let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin")); let n = graph.get_neighbors(&12).unwrap() .map(|v| { println!("get v : {}", v.id); v }) .fold(0, |count, _| count + 1); assert_eq!(n, 4); } } #[test] fn test_graph() { let graph = GraphTopology::with_default(3, 1, true); { let mut ns = vec![]; for n in graph.get_neighbors(&1).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![2, 3, 4]); } { let mut ns = vec![]; for n in graph.get_neighbors(&6).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![7, 8]); } } }
new
identifier_name
topology.rs
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. use std::path::Path; use std::io::{BufReader, BufRead, ErrorKind, BufWriter, Write, Read}; use std::fs::File; use std::collections::HashMap; use std::cmp::min; use std::time::Instant; use std::sync::Arc; use byteorder::{BigEndian, WriteBytesExt, ByteOrder}; pub struct SegmentList<T> { shift: usize, seg_size: usize, segments: Vec<Vec<T>>, current: Vec<T>, len: usize, } impl<T> SegmentList<T> { pub fn new(shift: usize) -> Self { let seg_size = 1 << shift; SegmentList { shift, seg_size, segments: Vec::new(), current: Vec::with_capacity(seg_size), len: 0, } } pub fn push(&mut self, e: T) { self.current.push(e); if self.current.len() == self.seg_size { self.segments.push(::std::mem::replace(&mut self.current, Vec::with_capacity(self.seg_size))); } self.len += 1; } pub fn get(&self, offset: usize) -> Option<&T> { let seg = offset >> self.shift; let offset = offset - self.seg_size * seg; if seg > self.segments.len() { None } else if seg == self.segments.len() { Some(&self.current[offset]) } else { Some(&self.segments[seg][offset]) } } pub fn get_multi(&self, start: usize, len: usize) -> Result<Vec<&T>, String> { let mut tmp = Vec::with_capacity(len); let mut seg = start >> self.shift; let offset = start - self.seg_size * seg; let mut left = len; let mut start = offset; while left > 0 { let end = min(left, self.seg_size - start); let read = self.get_in_seg(seg, start, end)?; for e in read.iter() { tmp.push(e); } seg += 1; start = 0; left -= read.len(); } Ok(tmp) } #[inline] pub fn len(&self) -> usize { self.len } #[inline] fn get_in_seg(&self, seg: usize, start: usize, len: usize) -> Result<&[T], String> { let end = start + len; if seg > self.segments.len() { Err("Index out of bound".to_owned()) } else if seg == self.segments.len() { if end > self.current.len() { Err("Index out of bound".to_owned()) } else { Ok(&self.current[start..end]) } } else { Ok(&self.segments[seg][start..end]) } } } /// 1 -> (2,3,4), /// 2 -> 3, /// 4 -> 5, /// 5 -> (1, 3), /// 6 -> (7, 8), /// 7 -> 8 const DEFAULT_GRAPH: [(u64, u64); 10] = [(1, 2), (1, 3), (1, 4), (2, 3), (5, 1), (5, 3), (4, 5), (6, 7), (6, 8), (7, 8)]; #[allow(dead_code)] pub struct GraphTopology { partition: u32, peers: u32, count: usize, neighbors: HashMap<u64, Arc<Vec<u64>>>, } #[derive(Clone, Serialize, Deserialize, Debug, Abomonation)] pub struct Vertex { pub id: u64, #[cfg(feature = "padding")] padding_1: [u64; 8], #[cfg(feature = "padding")] padding_2: [u64; 7], } impl Vertex { pub fn new(id: u64) -> Self { Vertex { id, #[cfg(feature = "padding")] padding_1: [0; 8], #[cfg(feature = "padding")] padding_2: [0; 7] } } } pub struct NeighborIter { cursor: usize, len: usize, inner: Arc<Vec<u64>> } impl NeighborIter { pub fn new(neighbors: &Arc<Vec<u64>>) -> Self { NeighborIter { cursor: 0, len: neighbors.len(), inner: neighbors.clone(), } } pub fn empty() -> Self { NeighborIter { cursor: 0, len: 0, inner: Arc::new(vec![]) } } } impl Iterator for NeighborIter { type Item = Vertex; fn next(&mut self) -> Option<Self::Item> { if self.cursor == self.len { None } else { self.cursor += 1; Some(Vertex::new(self.inner[self.cursor - 1])) } } } impl GraphTopology { pub fn with_default(partition: u32, peers: u32, directed: bool) -> Self { let mut neighbors = HashMap::new(); let mut count = 0; for (s, d) in DEFAULT_GRAPH.iter() { if peers == 1 || (s % peers as u64) as u32 == partition { let n = neighbors.entry(*s).or_insert(Vec::new()); n.push(*d);
if peers == 1 || (d % peers as u64) as u32 == partition { let n = neighbors.entry(*d).or_insert(Vec::new()); if!directed { n.push(*s); count += 1; } } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers, neighbors: arc_neighbors } } pub fn load<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, split: char, path: P) -> Self { let as_bin = path.as_ref().with_extension("bin"); Self::convert_to_bin(path, as_bin.as_path(), split); info!("Convert raw file format to binary {:?}", as_bin.as_os_str()); Self::load_bin(partition, peers, directed, as_bin.as_path()) } /// Load graph from binary file. /// /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn load_bin<P: AsRef<Path>>(partition: u32, peers: u32, directed: bool, path: P) -> Self { let mut reader = BufReader::new(File::open(path).unwrap()); //let mut reader = File::open(path).unwrap(); let mut neighbors = HashMap::new(); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); let mut buffer = [0u8;1<< 12]; let peers = peers as u64; loop { let read = match reader.read(&mut buffer[0..]) { Ok(n) => n, Err(e) => { if let ErrorKind::UnexpectedEof = e.kind() { break } else { panic!(e); } } }; if read > 0 { assert!(read % 8 == 0, "unexpected: read {} bytes", read); let valid = &mut buffer[0..read]; let mut extract = 0; while extract < read { let src = BigEndian::read_u64(&valid[extract..]); let dst = BigEndian::read_u64(&valid[extract + 8..]); if peers == 1 || (src % peers) as u32 == partition { let n = neighbors.entry(src).or_insert_with(|| Vec::new()); n.push(dst); } if!directed && (peers == 1 || (dst % peers) as u32 == partition) { let n = neighbors.entry(dst).or_insert_with(|| Vec::new()); n.push(src); } count += 1; if log::log_enabled!(log::Level::Debug) { if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } extract += 16; } } else { break } } let mut arc_neighbors = HashMap::new(); for (k, v) in neighbors.drain() { arc_neighbors.insert(k, Arc::new(v)); } GraphTopology { partition, count, peers: peers as u32, neighbors: arc_neighbors, } } /// Convert graph file from raw text format to binary format. /// The binary file should follow this format: src1 dst1 src2 dst2 src3 dst3... /// Vertex IDs are 32-bit big endian integers. pub fn convert_to_bin<P1: AsRef<Path>, P2: AsRef<Path>>(input: P1, output: P2, split: char) { let reader = BufReader::new(File::open(input).unwrap()); let mut writer = BufWriter::new(File::create(output).unwrap()); let mut count = 0_usize; let mut start = ::std::time::Instant::now(); for edge in reader.lines() { let edge = edge.unwrap(); let edge = edge.split(split).collect::<Vec<_>>(); let src: u64 = edge[0].parse().unwrap(); let dst: u64 = edge[1].parse().unwrap(); writer.write_u64::<BigEndian>(src).unwrap(); writer.write_u64::<BigEndian>(dst).unwrap(); count += 1; if count % 5000000 == 0 { let duration_ms = (Instant::now() - start).as_millis() as f64; let speed = 5000000.0 / duration_ms * 1000.0; debug!("Scanned edges: {}, speed: {:.2}/s", count, speed); start = ::std::time::Instant::now(); } } writer.flush().unwrap(); } pub fn get_neighbors(&self, src: &u64) -> Option<NeighborIter> { self.neighbors.get(src).map(|n| { NeighborIter::new(n) }) } #[inline] pub fn count_nodes(&self) -> usize { self.neighbors.len() } #[inline] pub fn count_edges(&self) -> usize { self.count } } #[cfg(test)] mod test { use super::*; use std::path::PathBuf; #[test] fn test_segment_list() { let mut list = SegmentList::new(6); for i in 0..1024 { list.push(i); } for i in 0..1024 { let e = list.get(i as usize).unwrap(); assert_eq!(i, *e); } for i in 0..1014 { let res = list.get_multi(i as usize, 10).unwrap(); //println!("get res {:?}", res); for j in 0..10 { assert_eq!(i + j, *res[j]); } } } #[test] fn test_graph_load() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); d.push("data/twitter_rv.net"); { println!("dir is : {}", d.display()); let graph = GraphTopology::load(1, 1, true,'', d.as_path()); println!("finish load"); let n = graph.get_neighbors(&12).unwrap() .fold(0, |count, _| count + 1); assert_eq!(n, 4); } { let graph = GraphTopology::load_bin(1, 1, true, d.as_path().with_extension("bin")); let n = graph.get_neighbors(&12).unwrap() .map(|v| { println!("get v : {}", v.id); v }) .fold(0, |count, _| count + 1); assert_eq!(n, 4); } } #[test] fn test_graph() { let graph = GraphTopology::with_default(3, 1, true); { let mut ns = vec![]; for n in graph.get_neighbors(&1).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![2, 3, 4]); } { let mut ns = vec![]; for n in graph.get_neighbors(&6).unwrap() { ns.push(n); } let mut ns = ns.into_iter().map(|v| v.id).collect::<Vec<_>>(); ns.sort(); assert_eq!(ns, vec![7, 8]); } } }
count += 1; }
random_line_split
lib.rs
//! Contract module which acts as a timelocked controller. When set as the //! owner of an `Ownable` smart contract, it enforces a timelock on all //! `onlyOwner` maintenance operations. This gives time for users of the //! controlled contract to exit before a potentially dangerous maintenance //! operation is applied. //! //! By default, this contract is self administered, meaning administration tasks //! have to go through the timelock process. The proposer (resp executor) role //! is in charge of proposing (resp executing) operations. A common use case is //! to position this {TimelockController} as the owner of a smart contract, with //! a multisig or a DAO as the sole proposer. #![cfg_attr(not(feature = "std"), no_std)] pub use access_control::{ Error, Result, RoleId, }; use ink_env::hash::Blake2x256; use ink_lang::ForwardCallMut; use ink_prelude::vec::Vec; use metis_access_control as access_control; use metis_lang::{ Env, FromAccountId, Storage, }; use metis_timelock_controller_receiver::Receiver; use scale::Encode; #[cfg(not(feature = "ink-as-dependency"))] use ::ink_storage::{ collections::HashMap as StorageHashMap, lazy::Lazy, traits::SpreadLayout, }; pub const TIMELOCK_ADMIN_ROLE: RoleId = RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE)); pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE)); pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE)); pub const _DONE_TIMESTAMP: u8 = 1; /// The Data of ERC20 component #[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))] #[derive(Debug, SpreadLayout)] pub struct Data<E: Env> { /// min delay for controller pub min_delay: Lazy<E::Timestamp>, pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>, } impl<E: Env> Data<E> { /// Sets the value of the `cap`. This value is immutable, it can only be /// set once during construction.
impl<E> Default for Data<E> where E: Env, { fn default() -> Self { Self { min_delay: Lazy::new(E::Timestamp::from(1_u8)), timestamps: StorageHashMap::default(), } } } impl<E: Env> Data<E> {} /// The `EventEmit` impl the event emit api for component. pub trait EventEmit<E: Env> { /// Emitted when a call is scheduled as part of operation `id`. fn emit_event_call_scheduled( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, delay: E::Timestamp, ); /// Emitted when a call is performed as part of operation `id`. fn emit_event_call_executed( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ); /// Emitted when operation `id` is cancelled. fn emit_event_cancelled(&mut self, id: [u8; 32]); /// Emitted when the minimum delay for future operations is modified. fn emit_event_min_delay_change( &mut self, old_duration: E::Timestamp, new_duration: E::Timestamp, ); } pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>> where E: Env, { /// initial the state of contract fn init( &mut self, min_delay: E::Timestamp, proposers: Vec<E::AccountId>, executors: Vec<E::AccountId>, ) { access_control::Impl::_set_role_admin( self, TIMELOCK_ADMIN_ROLE, TIMELOCK_ADMIN_ROLE, ); access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE); access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE); // deployer + self administration access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller()); // access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this)); // register proposers for proposer in proposers.iter() { access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone()); } // register executors for executor in executors.iter() { access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone()); } Lazy::set( &mut Storage::<E, Data<E>>::get_mut(self).min_delay, min_delay, ); self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay); } /// To make a function callable only by a certain role. In /// addition to checking the sender's role, `address(0)`'s role is also /// considered. Granting a role to `address(0)` is equivalent to enabling /// this role for everyone. fn ensure_only_role_or_open_role(&self, role: RoleId) { if!access_control::Impl::has_role(self, role, E::AccountId::default()) { access_control::Impl::ensure_caller_role(self, role); } } /// Returns whether an id correspond to a registered operation. This /// includes both Pending, Ready and Done operations. fn is_operation(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(0_u8) } /// Returns whether an operation is pending or not. fn is_operation_pending(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns whether an operation is ready or not. fn is_operation_ready(&self, id: &[u8; 32]) -> bool { let timestamp = self.get_timestamp(id); timestamp > E::Timestamp::from(_DONE_TIMESTAMP) && timestamp <= Self::block_timestamp() } /// Returns whether an operation is done or not. fn is_operation_done(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns the timestamp at with an operation becomes ready (0 for /// unset operations, 1 for done operations). fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp { *Storage::<E, Data<E>>::get(self) .timestamps .get(id) .unwrap_or(&E::Timestamp::from(0_u8)) } /// Returns the minimum delay for an operation to become valid. /// /// This value can be changed by executing an operation that calls `updateDelay`. fn get_min_delay(&self) -> E::Timestamp { *Storage::<E, Data<E>>::get(self).min_delay } /// Returns the identifier of an operation containing a single /// transaction. fn hash_operation( &self, target: &E::AccountId, value: &E::Balance, data: &Vec<u8>, predecessor: &Option<[u8; 32]>, salt: &[u8; 32], ) -> [u8; 32] { // for target + value + data + predecessor + salt let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len()); hash_data.append(&mut target.encode()); hash_data.append(&mut value.encode()); hash_data.append(&mut data.clone()); hash_data.append(&mut predecessor.encode()); for s in salt.into_iter() { hash_data.push(s.clone()); } Self::hash_bytes::<Blake2x256>(&hash_data) } /// Schedule an operation containing a single transaction. /// /// Emits a `CallScheduled` event. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn schedule( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], delay: E::Timestamp, ) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._schedule(id, delay); self.emit_event_call_scheduled(id, target, value, data, predecessor, delay); } /// Schedule an operation that is to becomes valid after a given delay. fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) { assert!( !self.is_operation(&id), "TimelockController: operation already scheduled" ); assert!( delay >= self.get_min_delay(), "TimelockController: insufficient delay" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, Self::block_timestamp() + delay); } /// Cancel an operation. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn cancel(&mut self, id: [u8; 32]) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); assert!( self.is_operation_pending(&id), "TimelockController: operation cannot be cancelled" ); Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id); self.emit_event_cancelled(id); } /// Execute an (ready) operation containing a single transaction. /// /// Emits a `CallExecuted` event. /// /// Requirements: /// /// - the caller must have the 'executor' role. fn execute( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], ) { self.ensure_only_role_or_open_role(EXECUTOR_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._before_call(predecessor); self._call(id, target, value, data); self._after_call(id); } /// Checks before execution of an operation's calls. fn _before_call(&self, predecessor: Option<[u8; 32]>) { match predecessor { Some(predecessor) => { assert!( self.is_operation_done(&predecessor), "TimelockController: missing dependency" ); () } None => (), } } /// Checks after execution of an operation's calls. fn _after_call(&mut self, id: [u8; 32]) { assert!( self.is_operation_ready(&id), "TimelockController: operation is not ready" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, E::Timestamp::from(_DONE_TIMESTAMP)); } /// Execute an operation's call. /// /// Emits a `CallExecuted` event. fn _call( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ) { let mut receiver = <Receiver as FromAccountId<E>>::from_account_id(target.clone()); let success = receiver .call_mut() .on_call(Self::caller().into(), data.clone()) .transferred_value(value.into()) .fire(); let success = match success { Ok(success) => success, Err(_) => false, }; assert!( success, "TimelockController: underlying transaction reverted" ); self.emit_event_call_executed(id, target, value, data); } /// Changes the minimum timelock duration for future operations. /// /// Emits a `MinDelayChange` event. /// /// Requirements: /// /// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing /// an operation where the timelock is the target and the data is the ABI-encoded call to this fn. fn _set_update_delay(&mut self, new_delay: E::Timestamp) { let current_min_delay = self.get_min_delay(); self.emit_event_min_delay_change(current_min_delay, new_delay); *Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay; } }
pub fn new() -> Self { Self::default() } }
random_line_split
lib.rs
//! Contract module which acts as a timelocked controller. When set as the //! owner of an `Ownable` smart contract, it enforces a timelock on all //! `onlyOwner` maintenance operations. This gives time for users of the //! controlled contract to exit before a potentially dangerous maintenance //! operation is applied. //! //! By default, this contract is self administered, meaning administration tasks //! have to go through the timelock process. The proposer (resp executor) role //! is in charge of proposing (resp executing) operations. A common use case is //! to position this {TimelockController} as the owner of a smart contract, with //! a multisig or a DAO as the sole proposer. #![cfg_attr(not(feature = "std"), no_std)] pub use access_control::{ Error, Result, RoleId, }; use ink_env::hash::Blake2x256; use ink_lang::ForwardCallMut; use ink_prelude::vec::Vec; use metis_access_control as access_control; use metis_lang::{ Env, FromAccountId, Storage, }; use metis_timelock_controller_receiver::Receiver; use scale::Encode; #[cfg(not(feature = "ink-as-dependency"))] use ::ink_storage::{ collections::HashMap as StorageHashMap, lazy::Lazy, traits::SpreadLayout, }; pub const TIMELOCK_ADMIN_ROLE: RoleId = RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE)); pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE)); pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE)); pub const _DONE_TIMESTAMP: u8 = 1; /// The Data of ERC20 component #[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))] #[derive(Debug, SpreadLayout)] pub struct Data<E: Env> { /// min delay for controller pub min_delay: Lazy<E::Timestamp>, pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>, } impl<E: Env> Data<E> { /// Sets the value of the `cap`. This value is immutable, it can only be /// set once during construction. pub fn new() -> Self { Self::default() } } impl<E> Default for Data<E> where E: Env, { fn default() -> Self { Self { min_delay: Lazy::new(E::Timestamp::from(1_u8)), timestamps: StorageHashMap::default(), } } } impl<E: Env> Data<E> {} /// The `EventEmit` impl the event emit api for component. pub trait EventEmit<E: Env> { /// Emitted when a call is scheduled as part of operation `id`. fn emit_event_call_scheduled( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, delay: E::Timestamp, ); /// Emitted when a call is performed as part of operation `id`. fn emit_event_call_executed( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ); /// Emitted when operation `id` is cancelled. fn emit_event_cancelled(&mut self, id: [u8; 32]); /// Emitted when the minimum delay for future operations is modified. fn emit_event_min_delay_change( &mut self, old_duration: E::Timestamp, new_duration: E::Timestamp, ); } pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>> where E: Env, { /// initial the state of contract fn init( &mut self, min_delay: E::Timestamp, proposers: Vec<E::AccountId>, executors: Vec<E::AccountId>, ) { access_control::Impl::_set_role_admin( self, TIMELOCK_ADMIN_ROLE, TIMELOCK_ADMIN_ROLE, ); access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE); access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE); // deployer + self administration access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller()); // access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this)); // register proposers for proposer in proposers.iter() { access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone()); } // register executors for executor in executors.iter() { access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone()); } Lazy::set( &mut Storage::<E, Data<E>>::get_mut(self).min_delay, min_delay, ); self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay); } /// To make a function callable only by a certain role. In /// addition to checking the sender's role, `address(0)`'s role is also /// considered. Granting a role to `address(0)` is equivalent to enabling /// this role for everyone. fn ensure_only_role_or_open_role(&self, role: RoleId) { if!access_control::Impl::has_role(self, role, E::AccountId::default()) { access_control::Impl::ensure_caller_role(self, role); } } /// Returns whether an id correspond to a registered operation. This /// includes both Pending, Ready and Done operations. fn is_operation(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(0_u8) } /// Returns whether an operation is pending or not. fn is_operation_pending(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns whether an operation is ready or not. fn is_operation_ready(&self, id: &[u8; 32]) -> bool { let timestamp = self.get_timestamp(id); timestamp > E::Timestamp::from(_DONE_TIMESTAMP) && timestamp <= Self::block_timestamp() } /// Returns whether an operation is done or not. fn is_operation_done(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns the timestamp at with an operation becomes ready (0 for /// unset operations, 1 for done operations). fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp { *Storage::<E, Data<E>>::get(self) .timestamps .get(id) .unwrap_or(&E::Timestamp::from(0_u8)) } /// Returns the minimum delay for an operation to become valid. /// /// This value can be changed by executing an operation that calls `updateDelay`. fn get_min_delay(&self) -> E::Timestamp { *Storage::<E, Data<E>>::get(self).min_delay } /// Returns the identifier of an operation containing a single /// transaction. fn hash_operation( &self, target: &E::AccountId, value: &E::Balance, data: &Vec<u8>, predecessor: &Option<[u8; 32]>, salt: &[u8; 32], ) -> [u8; 32] { // for target + value + data + predecessor + salt let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len()); hash_data.append(&mut target.encode()); hash_data.append(&mut value.encode()); hash_data.append(&mut data.clone()); hash_data.append(&mut predecessor.encode()); for s in salt.into_iter() { hash_data.push(s.clone()); } Self::hash_bytes::<Blake2x256>(&hash_data) } /// Schedule an operation containing a single transaction. /// /// Emits a `CallScheduled` event. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn schedule( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], delay: E::Timestamp, ) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._schedule(id, delay); self.emit_event_call_scheduled(id, target, value, data, predecessor, delay); } /// Schedule an operation that is to becomes valid after a given delay. fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) { assert!( !self.is_operation(&id), "TimelockController: operation already scheduled" ); assert!( delay >= self.get_min_delay(), "TimelockController: insufficient delay" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, Self::block_timestamp() + delay); } /// Cancel an operation. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn cancel(&mut self, id: [u8; 32]) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); assert!( self.is_operation_pending(&id), "TimelockController: operation cannot be cancelled" ); Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id); self.emit_event_cancelled(id); } /// Execute an (ready) operation containing a single transaction. /// /// Emits a `CallExecuted` event. /// /// Requirements: /// /// - the caller must have the 'executor' role. fn execute( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], ) { self.ensure_only_role_or_open_role(EXECUTOR_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._before_call(predecessor); self._call(id, target, value, data); self._after_call(id); } /// Checks before execution of an operation's calls. fn _before_call(&self, predecessor: Option<[u8; 32]>) { match predecessor { Some(predecessor) => { assert!( self.is_operation_done(&predecessor), "TimelockController: missing dependency" ); () } None => (), } } /// Checks after execution of an operation's calls. fn _after_call(&mut self, id: [u8; 32])
/// Execute an operation's call. /// /// Emits a `CallExecuted` event. fn _call( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ) { let mut receiver = <Receiver as FromAccountId<E>>::from_account_id(target.clone()); let success = receiver .call_mut() .on_call(Self::caller().into(), data.clone()) .transferred_value(value.into()) .fire(); let success = match success { Ok(success) => success, Err(_) => false, }; assert!( success, "TimelockController: underlying transaction reverted" ); self.emit_event_call_executed(id, target, value, data); } /// Changes the minimum timelock duration for future operations. /// /// Emits a `MinDelayChange` event. /// /// Requirements: /// /// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing /// an operation where the timelock is the target and the data is the ABI-encoded call to this fn. fn _set_update_delay(&mut self, new_delay: E::Timestamp) { let current_min_delay = self.get_min_delay(); self.emit_event_min_delay_change(current_min_delay, new_delay); *Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay; } }
{ assert!( self.is_operation_ready(&id), "TimelockController: operation is not ready" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, E::Timestamp::from(_DONE_TIMESTAMP)); }
identifier_body
lib.rs
//! Contract module which acts as a timelocked controller. When set as the //! owner of an `Ownable` smart contract, it enforces a timelock on all //! `onlyOwner` maintenance operations. This gives time for users of the //! controlled contract to exit before a potentially dangerous maintenance //! operation is applied. //! //! By default, this contract is self administered, meaning administration tasks //! have to go through the timelock process. The proposer (resp executor) role //! is in charge of proposing (resp executing) operations. A common use case is //! to position this {TimelockController} as the owner of a smart contract, with //! a multisig or a DAO as the sole proposer. #![cfg_attr(not(feature = "std"), no_std)] pub use access_control::{ Error, Result, RoleId, }; use ink_env::hash::Blake2x256; use ink_lang::ForwardCallMut; use ink_prelude::vec::Vec; use metis_access_control as access_control; use metis_lang::{ Env, FromAccountId, Storage, }; use metis_timelock_controller_receiver::Receiver; use scale::Encode; #[cfg(not(feature = "ink-as-dependency"))] use ::ink_storage::{ collections::HashMap as StorageHashMap, lazy::Lazy, traits::SpreadLayout, }; pub const TIMELOCK_ADMIN_ROLE: RoleId = RoleId::new(metis_lang::hash!(TIMELOCK_ADMIN_ROLE)); pub const PROPOSER_ROLE: RoleId = RoleId::new(metis_lang::hash!(PROPOSER_ROLE)); pub const EXECUTOR_ROLE: RoleId = RoleId::new(metis_lang::hash!(EXECUTOR_ROLE)); pub const _DONE_TIMESTAMP: u8 = 1; /// The Data of ERC20 component #[cfg_attr(feature = "std", derive(::ink_storage::traits::StorageLayout))] #[derive(Debug, SpreadLayout)] pub struct Data<E: Env> { /// min delay for controller pub min_delay: Lazy<E::Timestamp>, pub timestamps: StorageHashMap<[u8; 32], E::Timestamp>, } impl<E: Env> Data<E> { /// Sets the value of the `cap`. This value is immutable, it can only be /// set once during construction. pub fn new() -> Self { Self::default() } } impl<E> Default for Data<E> where E: Env, { fn default() -> Self { Self { min_delay: Lazy::new(E::Timestamp::from(1_u8)), timestamps: StorageHashMap::default(), } } } impl<E: Env> Data<E> {} /// The `EventEmit` impl the event emit api for component. pub trait EventEmit<E: Env> { /// Emitted when a call is scheduled as part of operation `id`. fn emit_event_call_scheduled( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, delay: E::Timestamp, ); /// Emitted when a call is performed as part of operation `id`. fn emit_event_call_executed( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ); /// Emitted when operation `id` is cancelled. fn emit_event_cancelled(&mut self, id: [u8; 32]); /// Emitted when the minimum delay for future operations is modified. fn emit_event_min_delay_change( &mut self, old_duration: E::Timestamp, new_duration: E::Timestamp, ); } pub trait Impl<E>: access_control::Impl<E> + EventEmit<E> + Storage<E, Data<E>> where E: Env, { /// initial the state of contract fn init( &mut self, min_delay: E::Timestamp, proposers: Vec<E::AccountId>, executors: Vec<E::AccountId>, ) { access_control::Impl::_set_role_admin( self, TIMELOCK_ADMIN_ROLE, TIMELOCK_ADMIN_ROLE, ); access_control::Impl::_set_role_admin(self, PROPOSER_ROLE, TIMELOCK_ADMIN_ROLE); access_control::Impl::_set_role_admin(self, EXECUTOR_ROLE, TIMELOCK_ADMIN_ROLE); // deployer + self administration access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, Self::caller()); // access_control::Impl::_setup_role(self, TIMELOCK_ADMIN_ROLE, address(this)); // register proposers for proposer in proposers.iter() { access_control::Impl::_setup_role(self, PROPOSER_ROLE, proposer.clone()); } // register executors for executor in executors.iter() { access_control::Impl::_setup_role(self, EXECUTOR_ROLE, executor.clone()); } Lazy::set( &mut Storage::<E, Data<E>>::get_mut(self).min_delay, min_delay, ); self.emit_event_min_delay_change(E::Timestamp::from(0_u8), min_delay); } /// To make a function callable only by a certain role. In /// addition to checking the sender's role, `address(0)`'s role is also /// considered. Granting a role to `address(0)` is equivalent to enabling /// this role for everyone. fn ensure_only_role_or_open_role(&self, role: RoleId) { if!access_control::Impl::has_role(self, role, E::AccountId::default()) { access_control::Impl::ensure_caller_role(self, role); } } /// Returns whether an id correspond to a registered operation. This /// includes both Pending, Ready and Done operations. fn is_operation(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(0_u8) } /// Returns whether an operation is pending or not. fn
(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) > E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns whether an operation is ready or not. fn is_operation_ready(&self, id: &[u8; 32]) -> bool { let timestamp = self.get_timestamp(id); timestamp > E::Timestamp::from(_DONE_TIMESTAMP) && timestamp <= Self::block_timestamp() } /// Returns whether an operation is done or not. fn is_operation_done(&self, id: &[u8; 32]) -> bool { self.get_timestamp(id) == E::Timestamp::from(_DONE_TIMESTAMP) } /// Returns the timestamp at with an operation becomes ready (0 for /// unset operations, 1 for done operations). fn get_timestamp(&self, id: &[u8; 32]) -> E::Timestamp { *Storage::<E, Data<E>>::get(self) .timestamps .get(id) .unwrap_or(&E::Timestamp::from(0_u8)) } /// Returns the minimum delay for an operation to become valid. /// /// This value can be changed by executing an operation that calls `updateDelay`. fn get_min_delay(&self) -> E::Timestamp { *Storage::<E, Data<E>>::get(self).min_delay } /// Returns the identifier of an operation containing a single /// transaction. fn hash_operation( &self, target: &E::AccountId, value: &E::Balance, data: &Vec<u8>, predecessor: &Option<[u8; 32]>, salt: &[u8; 32], ) -> [u8; 32] { // for target + value + data + predecessor + salt let mut hash_data: Vec<u8> = Vec::with_capacity(128 + data.len()); hash_data.append(&mut target.encode()); hash_data.append(&mut value.encode()); hash_data.append(&mut data.clone()); hash_data.append(&mut predecessor.encode()); for s in salt.into_iter() { hash_data.push(s.clone()); } Self::hash_bytes::<Blake2x256>(&hash_data) } /// Schedule an operation containing a single transaction. /// /// Emits a `CallScheduled` event. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn schedule( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], delay: E::Timestamp, ) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._schedule(id, delay); self.emit_event_call_scheduled(id, target, value, data, predecessor, delay); } /// Schedule an operation that is to becomes valid after a given delay. fn _schedule(&mut self, id: [u8; 32], delay: E::Timestamp) { assert!( !self.is_operation(&id), "TimelockController: operation already scheduled" ); assert!( delay >= self.get_min_delay(), "TimelockController: insufficient delay" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, Self::block_timestamp() + delay); } /// Cancel an operation. /// /// Requirements: /// /// - the caller must have the 'proposer' role. fn cancel(&mut self, id: [u8; 32]) { access_control::Impl::ensure_caller_role(self, PROPOSER_ROLE); assert!( self.is_operation_pending(&id), "TimelockController: operation cannot be cancelled" ); Storage::<E, Data<E>>::get_mut(self).timestamps.take(&id); self.emit_event_cancelled(id); } /// Execute an (ready) operation containing a single transaction. /// /// Emits a `CallExecuted` event. /// /// Requirements: /// /// - the caller must have the 'executor' role. fn execute( &mut self, target: E::AccountId, value: E::Balance, data: Vec<u8>, predecessor: Option<[u8; 32]>, salt: [u8; 32], ) { self.ensure_only_role_or_open_role(EXECUTOR_ROLE); let id = self.hash_operation(&target, &value, &data, &predecessor, &salt); self._before_call(predecessor); self._call(id, target, value, data); self._after_call(id); } /// Checks before execution of an operation's calls. fn _before_call(&self, predecessor: Option<[u8; 32]>) { match predecessor { Some(predecessor) => { assert!( self.is_operation_done(&predecessor), "TimelockController: missing dependency" ); () } None => (), } } /// Checks after execution of an operation's calls. fn _after_call(&mut self, id: [u8; 32]) { assert!( self.is_operation_ready(&id), "TimelockController: operation is not ready" ); Storage::<E, Data<E>>::get_mut(self) .timestamps .insert(id, E::Timestamp::from(_DONE_TIMESTAMP)); } /// Execute an operation's call. /// /// Emits a `CallExecuted` event. fn _call( &mut self, id: [u8; 32], target: E::AccountId, value: E::Balance, data: Vec<u8>, ) { let mut receiver = <Receiver as FromAccountId<E>>::from_account_id(target.clone()); let success = receiver .call_mut() .on_call(Self::caller().into(), data.clone()) .transferred_value(value.into()) .fire(); let success = match success { Ok(success) => success, Err(_) => false, }; assert!( success, "TimelockController: underlying transaction reverted" ); self.emit_event_call_executed(id, target, value, data); } /// Changes the minimum timelock duration for future operations. /// /// Emits a `MinDelayChange` event. /// /// Requirements: /// /// - the caller must be the timelock itself. This can only be achieved by scheduling and later executing /// an operation where the timelock is the target and the data is the ABI-encoded call to this fn. fn _set_update_delay(&mut self, new_delay: E::Timestamp) { let current_min_delay = self.get_min_delay(); self.emit_event_min_delay_change(current_min_delay, new_delay); *Storage::<E, Data<E>>::get_mut(self).min_delay = new_delay; } }
is_operation_pending
identifier_name
scheduling.rs
use super::*; use crate::domain::scheduling::*; use chrono::{DateTime, SecondsFormat}; use futures::{Async, Future, Stream}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use tokio::timer::{ delay_queue::{Expired, Key as DelayQueueKey}, DelayQueue, }; enum DelayQueueItem<T> { TaskSchedule(T), KeepAlive, } // The valid duration is limited by this upper bound that is // reserved for the keep alive token! // TODO: The maximum acceptable value of 795 days that does // not cause an internal panic has been discovered experimentally. // No references about this limit can be found in the Tokio docs!? const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60); struct ScheduledTaskQueue<T> { task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, upcoming_tasks: DelayQueue<DelayQueueItem<T>>, keep_alive_key: DelayQueueKey, } fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String where <Z as chrono::TimeZone>::Offset: std::fmt::Display, { dt.to_rfc3339_opts(SecondsFormat::Millis, true) } impl<T> ScheduledTaskQueue<T> where T: TaskSchedule + std::fmt::Debug, <T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display, { pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self { let mut upcoming_tasks = DelayQueue::new(); let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); Self { task_scheduler, upcoming_tasks, keep_alive_key, } } fn keep_alive(&mut self) { self.upcoming_tasks .reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT); } pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) { match expired.into_inner() { DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule), DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()), } } fn reschedule_expired(&mut self, task_schedule: T) { let now = self.task_scheduler.now(); debug!("{:?} expired at {}", task_schedule, now); let task_reschedule = self .task_scheduler .dispatch_and_reschedule_expired_task(&now, task_schedule); if let Some(task_schedule) = task_reschedule { self.schedule_next(&now, task_schedule); self.keep_alive(); } } fn schedule_next( &mut self, now: &DateTime<T::TimeZone>, task_schedule: T, ) -> Option<DateTime<T::TimeZone>> { if let Some(next_after_now) = task_schedule.schedule_next_after(now) { debug_assert!(next_after_now > *now); debug!( "Rescheduling {:?} at {}", task_schedule, format_datetime(&next_after_now) ); let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap(); if timeout < MAX_DELAY_TIMEOUT { self.upcoming_tasks .insert(DelayQueueItem::TaskSchedule(task_schedule), timeout); Some(next_after_now) } else { error!( "Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}", task_schedule, format_datetime(&next_after_now), timeout, MAX_DELAY_TIMEOUT ); None } } else { debug!("Finished {:?}", task_schedule); None } } pub fn reschedule_all(&mut self, task_schedules: Vec<T>) { // Clear the delay queue, i.e. discard all tasks debug!("Discarding all scheduled tasks"); self.upcoming_tasks.clear(); // Repopulate the delay queue with the given irrigation schedules debug_assert!(self.upcoming_tasks.is_empty()); self.upcoming_tasks.reserve(task_schedules.len() + 1); let now = self.task_scheduler.now(); task_schedules.into_iter().for_each(|task_schedule| { self.schedule_next(&now, task_schedule); }); self.keep_alive_key = self .upcoming_tasks .insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); } } impl<T> Stream for ScheduledTaskQueue<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.upcoming_tasks.poll() } } // This mutex will only be locked within the same executor, // though maybe subsequently by different threads. There // won't be any lock contention, i.e. no thread will block // when locking this mutex! It is only required to satisfy // the Send bound for the enclosing context. struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>); impl<T> ScheduledTasks<T> { pub fn new(inner: ScheduledTaskQueue<T>) -> Self { ScheduledTasks(Arc::new(Mutex::new(inner))) } pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> { // Even a try_lock() should never fail, but we prefer // the blocking variant to be safe! let lock_result = self.0.lock(); debug_assert!(lock_result.is_ok()); match lock_result { Ok(guard) => guard, Err(err) => { error!("Failed to lock mutex of scheduled tasks: {}", err); unreachable!(); } } } } impl<T> Clone for ScheduledTasks<T> { fn clone(&self) -> Self { ScheduledTasks(self.0.clone()) } } impl<T> Stream for ScheduledTasks<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.lock_inner().poll() } } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingSignal {} #[derive(Debug, Clone, PartialEq)] pub enum TaskSchedulingCommand<T: TaskSchedule> { RescheduleAll(Vec<T>), } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingQuery {} pub type TaskSchedulingAction<T> = Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>; pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>; type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>; #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingNotification {} type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>; pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>; pub struct TaskSchedulingActor<T: TaskSchedule> { // Currently unused _notification_tx: TaskSchedulingNotificationSender, scheduled_tasks: ScheduledTasks<T>, } impl<T> TaskSchedulingActor<T> where T: TaskSchedule + Send + std::fmt::Debug, { pub fn create( task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, ) -> ( impl Future<Item = (), Error = ()>, TaskSchedulingActionSender<T>, TaskSchedulingNotificationReceiver, ) { let (action_tx, action_rx) = new_action_channel(); let (_notification_tx, notification_rx) = new_notification_channel(); let event_loop = futures::lazy(move || { info!("Starting scheduler"); // Lazy instantiation is essential to implicitly attach the // DelayQueue to the Timer of the corresponding Runtime in // DelayQueue::new()!!! Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler))) }) .and_then(move |scheduled_tasks| { // Create a handler for expired tasks let mut expired_tasks = scheduled_tasks.clone(); let expired_tasks_handler = scheduled_tasks .clone() .for_each(move |expired| { expired_tasks.lock_inner().handle_expired(expired); Ok(()) }) .map_err(|err| error!("Failed to handle expired tasks: {}", err)); Ok((scheduled_tasks, expired_tasks_handler)) }) .and_then(move |(scheduled_tasks, expired_tasks_handler)| { // Create a handler for actions... let action_handler = Self { _notification_tx, scheduled_tasks, } .handle_actions(action_rx); //...and combine the handlers. // Warning: The order for combining both futures seems to matter!! // Using select() on action_handler followed by expired_tasks_handler // as an argument works as expected. When reversing this order any // previously rescheduled tasks are retained and don't expire until // the next action is received. action_handler .select(expired_tasks_handler) .map(drop) .map_err(drop) }); (event_loop, action_tx, notification_rx) } fn handle_actions( mut self, action_rx: TaskSchedulingActionReceiver<T>, ) -> impl Future<Item = (), Error = ()> { action_rx.for_each(move |action| { self.handle_action(action); Ok(()) }) } fn handle_action(&mut self, action: TaskSchedulingAction<T>) { match action { Action::Signal(signal) => match signal {}, Action::Command(response_tx, command) => self.handle_command(response_tx, command), Action::Query(query) => match query {}, } } fn
( &mut self, response_tx: CommandResponseSender, command: TaskSchedulingCommand<T>, ) { let result = match command { TaskSchedulingCommand::RescheduleAll(task_schedules) => { self.scheduled_tasks .lock_inner() .reschedule_all(task_schedules); Ok(()) } }; reply(response_tx, result); } }
handle_command
identifier_name
scheduling.rs
use super::*; use crate::domain::scheduling::*; use chrono::{DateTime, SecondsFormat}; use futures::{Async, Future, Stream}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use tokio::timer::{ delay_queue::{Expired, Key as DelayQueueKey}, DelayQueue, }; enum DelayQueueItem<T> { TaskSchedule(T), KeepAlive, } // The valid duration is limited by this upper bound that is // reserved for the keep alive token! // TODO: The maximum acceptable value of 795 days that does // not cause an internal panic has been discovered experimentally. // No references about this limit can be found in the Tokio docs!? const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60); struct ScheduledTaskQueue<T> { task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, upcoming_tasks: DelayQueue<DelayQueueItem<T>>, keep_alive_key: DelayQueueKey, } fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String where <Z as chrono::TimeZone>::Offset: std::fmt::Display, { dt.to_rfc3339_opts(SecondsFormat::Millis, true) } impl<T> ScheduledTaskQueue<T> where T: TaskSchedule + std::fmt::Debug, <T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display, { pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self { let mut upcoming_tasks = DelayQueue::new(); let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); Self { task_scheduler, upcoming_tasks, keep_alive_key, } } fn keep_alive(&mut self) { self.upcoming_tasks .reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT); } pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) { match expired.into_inner() { DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule), DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()), } } fn reschedule_expired(&mut self, task_schedule: T) { let now = self.task_scheduler.now(); debug!("{:?} expired at {}", task_schedule, now); let task_reschedule = self .task_scheduler .dispatch_and_reschedule_expired_task(&now, task_schedule); if let Some(task_schedule) = task_reschedule { self.schedule_next(&now, task_schedule); self.keep_alive(); } } fn schedule_next( &mut self, now: &DateTime<T::TimeZone>, task_schedule: T, ) -> Option<DateTime<T::TimeZone>> { if let Some(next_after_now) = task_schedule.schedule_next_after(now) { debug_assert!(next_after_now > *now); debug!( "Rescheduling {:?} at {}", task_schedule, format_datetime(&next_after_now) ); let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap(); if timeout < MAX_DELAY_TIMEOUT { self.upcoming_tasks .insert(DelayQueueItem::TaskSchedule(task_schedule), timeout); Some(next_after_now) } else { error!( "Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}", task_schedule, format_datetime(&next_after_now), timeout, MAX_DELAY_TIMEOUT ); None } } else { debug!("Finished {:?}", task_schedule); None } } pub fn reschedule_all(&mut self, task_schedules: Vec<T>) { // Clear the delay queue, i.e. discard all tasks debug!("Discarding all scheduled tasks"); self.upcoming_tasks.clear(); // Repopulate the delay queue with the given irrigation schedules debug_assert!(self.upcoming_tasks.is_empty()); self.upcoming_tasks.reserve(task_schedules.len() + 1); let now = self.task_scheduler.now(); task_schedules.into_iter().for_each(|task_schedule| { self.schedule_next(&now, task_schedule); }); self.keep_alive_key = self .upcoming_tasks .insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); } } impl<T> Stream for ScheduledTaskQueue<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.upcoming_tasks.poll() } } // This mutex will only be locked within the same executor, // though maybe subsequently by different threads. There // won't be any lock contention, i.e. no thread will block // when locking this mutex! It is only required to satisfy // the Send bound for the enclosing context. struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>); impl<T> ScheduledTasks<T> { pub fn new(inner: ScheduledTaskQueue<T>) -> Self { ScheduledTasks(Arc::new(Mutex::new(inner))) } pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> { // Even a try_lock() should never fail, but we prefer // the blocking variant to be safe! let lock_result = self.0.lock(); debug_assert!(lock_result.is_ok()); match lock_result { Ok(guard) => guard, Err(err) => { error!("Failed to lock mutex of scheduled tasks: {}", err); unreachable!(); } } } } impl<T> Clone for ScheduledTasks<T> { fn clone(&self) -> Self { ScheduledTasks(self.0.clone()) } } impl<T> Stream for ScheduledTasks<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.lock_inner().poll() } } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingSignal {} #[derive(Debug, Clone, PartialEq)] pub enum TaskSchedulingCommand<T: TaskSchedule> { RescheduleAll(Vec<T>), } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingQuery {} pub type TaskSchedulingAction<T> = Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>; pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>; type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>; #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingNotification {} type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>; pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>; pub struct TaskSchedulingActor<T: TaskSchedule> { // Currently unused _notification_tx: TaskSchedulingNotificationSender,
impl<T> TaskSchedulingActor<T> where T: TaskSchedule + Send + std::fmt::Debug, { pub fn create( task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, ) -> ( impl Future<Item = (), Error = ()>, TaskSchedulingActionSender<T>, TaskSchedulingNotificationReceiver, ) { let (action_tx, action_rx) = new_action_channel(); let (_notification_tx, notification_rx) = new_notification_channel(); let event_loop = futures::lazy(move || { info!("Starting scheduler"); // Lazy instantiation is essential to implicitly attach the // DelayQueue to the Timer of the corresponding Runtime in // DelayQueue::new()!!! Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler))) }) .and_then(move |scheduled_tasks| { // Create a handler for expired tasks let mut expired_tasks = scheduled_tasks.clone(); let expired_tasks_handler = scheduled_tasks .clone() .for_each(move |expired| { expired_tasks.lock_inner().handle_expired(expired); Ok(()) }) .map_err(|err| error!("Failed to handle expired tasks: {}", err)); Ok((scheduled_tasks, expired_tasks_handler)) }) .and_then(move |(scheduled_tasks, expired_tasks_handler)| { // Create a handler for actions... let action_handler = Self { _notification_tx, scheduled_tasks, } .handle_actions(action_rx); //...and combine the handlers. // Warning: The order for combining both futures seems to matter!! // Using select() on action_handler followed by expired_tasks_handler // as an argument works as expected. When reversing this order any // previously rescheduled tasks are retained and don't expire until // the next action is received. action_handler .select(expired_tasks_handler) .map(drop) .map_err(drop) }); (event_loop, action_tx, notification_rx) } fn handle_actions( mut self, action_rx: TaskSchedulingActionReceiver<T>, ) -> impl Future<Item = (), Error = ()> { action_rx.for_each(move |action| { self.handle_action(action); Ok(()) }) } fn handle_action(&mut self, action: TaskSchedulingAction<T>) { match action { Action::Signal(signal) => match signal {}, Action::Command(response_tx, command) => self.handle_command(response_tx, command), Action::Query(query) => match query {}, } } fn handle_command( &mut self, response_tx: CommandResponseSender, command: TaskSchedulingCommand<T>, ) { let result = match command { TaskSchedulingCommand::RescheduleAll(task_schedules) => { self.scheduled_tasks .lock_inner() .reschedule_all(task_schedules); Ok(()) } }; reply(response_tx, result); } }
scheduled_tasks: ScheduledTasks<T>, }
random_line_split
scheduling.rs
use super::*; use crate::domain::scheduling::*; use chrono::{DateTime, SecondsFormat}; use futures::{Async, Future, Stream}; use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; use tokio::timer::{ delay_queue::{Expired, Key as DelayQueueKey}, DelayQueue, }; enum DelayQueueItem<T> { TaskSchedule(T), KeepAlive, } // The valid duration is limited by this upper bound that is // reserved for the keep alive token! // TODO: The maximum acceptable value of 795 days that does // not cause an internal panic has been discovered experimentally. // No references about this limit can be found in the Tokio docs!? const MAX_DELAY_TIMEOUT: Duration = Duration::from_secs(795 * 24 * 60 * 60); struct ScheduledTaskQueue<T> { task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, upcoming_tasks: DelayQueue<DelayQueueItem<T>>, keep_alive_key: DelayQueueKey, } fn format_datetime<Z: chrono::TimeZone>(dt: &DateTime<Z>) -> String where <Z as chrono::TimeZone>::Offset: std::fmt::Display, { dt.to_rfc3339_opts(SecondsFormat::Millis, true) } impl<T> ScheduledTaskQueue<T> where T: TaskSchedule + std::fmt::Debug, <T::TimeZone as chrono::TimeZone>::Offset: std::fmt::Display, { pub fn new(task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>) -> Self { let mut upcoming_tasks = DelayQueue::new(); let keep_alive_key = upcoming_tasks.insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); Self { task_scheduler, upcoming_tasks, keep_alive_key, } } fn keep_alive(&mut self) { self.upcoming_tasks .reset(&self.keep_alive_key, MAX_DELAY_TIMEOUT); } pub fn handle_expired(&mut self, expired: Expired<DelayQueueItem<T>>) { match expired.into_inner() { DelayQueueItem::TaskSchedule(task_schedule) => self.reschedule_expired(task_schedule), DelayQueueItem::KeepAlive => self.reschedule_all(Default::default()), } } fn reschedule_expired(&mut self, task_schedule: T) { let now = self.task_scheduler.now(); debug!("{:?} expired at {}", task_schedule, now); let task_reschedule = self .task_scheduler .dispatch_and_reschedule_expired_task(&now, task_schedule); if let Some(task_schedule) = task_reschedule { self.schedule_next(&now, task_schedule); self.keep_alive(); } } fn schedule_next( &mut self, now: &DateTime<T::TimeZone>, task_schedule: T, ) -> Option<DateTime<T::TimeZone>> { if let Some(next_after_now) = task_schedule.schedule_next_after(now) { debug_assert!(next_after_now > *now); debug!( "Rescheduling {:?} at {}", task_schedule, format_datetime(&next_after_now) ); let timeout = (next_after_now.clone() - now.clone()).to_std().unwrap(); if timeout < MAX_DELAY_TIMEOUT { self.upcoming_tasks .insert(DelayQueueItem::TaskSchedule(task_schedule), timeout); Some(next_after_now) } else { error!( "Cannot reschedule {:?} at {}: Maximum timeout duration exceeded: {:?} >= {:?}", task_schedule, format_datetime(&next_after_now), timeout, MAX_DELAY_TIMEOUT ); None } } else { debug!("Finished {:?}", task_schedule); None } } pub fn reschedule_all(&mut self, task_schedules: Vec<T>) { // Clear the delay queue, i.e. discard all tasks debug!("Discarding all scheduled tasks"); self.upcoming_tasks.clear(); // Repopulate the delay queue with the given irrigation schedules debug_assert!(self.upcoming_tasks.is_empty()); self.upcoming_tasks.reserve(task_schedules.len() + 1); let now = self.task_scheduler.now(); task_schedules.into_iter().for_each(|task_schedule| { self.schedule_next(&now, task_schedule); }); self.keep_alive_key = self .upcoming_tasks .insert(DelayQueueItem::KeepAlive, MAX_DELAY_TIMEOUT); } } impl<T> Stream for ScheduledTaskQueue<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.upcoming_tasks.poll() } } // This mutex will only be locked within the same executor, // though maybe subsequently by different threads. There // won't be any lock contention, i.e. no thread will block // when locking this mutex! It is only required to satisfy // the Send bound for the enclosing context. struct ScheduledTasks<T>(Arc<Mutex<ScheduledTaskQueue<T>>>); impl<T> ScheduledTasks<T> { pub fn new(inner: ScheduledTaskQueue<T>) -> Self { ScheduledTasks(Arc::new(Mutex::new(inner))) } pub fn lock_inner(&mut self) -> MutexGuard<ScheduledTaskQueue<T>> { // Even a try_lock() should never fail, but we prefer // the blocking variant to be safe! let lock_result = self.0.lock(); debug_assert!(lock_result.is_ok()); match lock_result { Ok(guard) => guard, Err(err) => { error!("Failed to lock mutex of scheduled tasks: {}", err); unreachable!(); } } } } impl<T> Clone for ScheduledTasks<T> { fn clone(&self) -> Self
} impl<T> Stream for ScheduledTasks<T> { type Item = <DelayQueue<DelayQueueItem<T>> as Stream>::Item; type Error = <DelayQueue<DelayQueueItem<T>> as Stream>::Error; fn poll(&mut self) -> Result<Async<Option<Self::Item>>, Self::Error> { self.lock_inner().poll() } } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingSignal {} #[derive(Debug, Clone, PartialEq)] pub enum TaskSchedulingCommand<T: TaskSchedule> { RescheduleAll(Vec<T>), } #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingQuery {} pub type TaskSchedulingAction<T> = Action<TaskSchedulingSignal, TaskSchedulingCommand<T>, TaskSchedulingQuery>; pub type TaskSchedulingActionSender<T> = ActionSender<TaskSchedulingAction<T>>; type TaskSchedulingActionReceiver<T> = ActionReceiver<TaskSchedulingAction<T>>; #[derive(Debug, Clone, Copy)] pub enum TaskSchedulingNotification {} type TaskSchedulingNotificationSender = NotificationSender<TaskSchedulingNotification>; pub type TaskSchedulingNotificationReceiver = NotificationReceiver<TaskSchedulingNotification>; pub struct TaskSchedulingActor<T: TaskSchedule> { // Currently unused _notification_tx: TaskSchedulingNotificationSender, scheduled_tasks: ScheduledTasks<T>, } impl<T> TaskSchedulingActor<T> where T: TaskSchedule + Send + std::fmt::Debug, { pub fn create( task_scheduler: Box<dyn TaskScheduler<TaskSchedule = T> + Send>, ) -> ( impl Future<Item = (), Error = ()>, TaskSchedulingActionSender<T>, TaskSchedulingNotificationReceiver, ) { let (action_tx, action_rx) = new_action_channel(); let (_notification_tx, notification_rx) = new_notification_channel(); let event_loop = futures::lazy(move || { info!("Starting scheduler"); // Lazy instantiation is essential to implicitly attach the // DelayQueue to the Timer of the corresponding Runtime in // DelayQueue::new()!!! Ok(ScheduledTasks::new(ScheduledTaskQueue::new(task_scheduler))) }) .and_then(move |scheduled_tasks| { // Create a handler for expired tasks let mut expired_tasks = scheduled_tasks.clone(); let expired_tasks_handler = scheduled_tasks .clone() .for_each(move |expired| { expired_tasks.lock_inner().handle_expired(expired); Ok(()) }) .map_err(|err| error!("Failed to handle expired tasks: {}", err)); Ok((scheduled_tasks, expired_tasks_handler)) }) .and_then(move |(scheduled_tasks, expired_tasks_handler)| { // Create a handler for actions... let action_handler = Self { _notification_tx, scheduled_tasks, } .handle_actions(action_rx); //...and combine the handlers. // Warning: The order for combining both futures seems to matter!! // Using select() on action_handler followed by expired_tasks_handler // as an argument works as expected. When reversing this order any // previously rescheduled tasks are retained and don't expire until // the next action is received. action_handler .select(expired_tasks_handler) .map(drop) .map_err(drop) }); (event_loop, action_tx, notification_rx) } fn handle_actions( mut self, action_rx: TaskSchedulingActionReceiver<T>, ) -> impl Future<Item = (), Error = ()> { action_rx.for_each(move |action| { self.handle_action(action); Ok(()) }) } fn handle_action(&mut self, action: TaskSchedulingAction<T>) { match action { Action::Signal(signal) => match signal {}, Action::Command(response_tx, command) => self.handle_command(response_tx, command), Action::Query(query) => match query {}, } } fn handle_command( &mut self, response_tx: CommandResponseSender, command: TaskSchedulingCommand<T>, ) { let result = match command { TaskSchedulingCommand::RescheduleAll(task_schedules) => { self.scheduled_tasks .lock_inner() .reschedule_all(task_schedules); Ok(()) } }; reply(response_tx, result); } }
{ ScheduledTasks(self.0.clone()) }
identifier_body
handler.rs
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use crate::protocol; use futures::prelude::*; use libp2p_core::ProtocolsHandlerEvent; use libp2p_core::protocols_handler::{ KeepAlive, SubstreamProtocol, ProtocolsHandler, ProtocolsHandlerUpgrErr, }; use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration}; use std::collections::VecDeque; use tokio_io::{AsyncRead, AsyncWrite}; use wasm_timer::{Delay, Instant}; use void::Void; /// The configuration for outbound pings. #[derive(Clone, Debug)] pub struct PingConfig { /// The timeout of an outbound ping. timeout: Duration, /// The duration between the last successful outbound or inbound ping /// and the next outbound ping. interval: Duration, /// The maximum number of failed outbound pings before the associated /// connection is deemed unhealthy, indicating to the `Swarm` that it /// should be closed. max_failures: NonZeroU32, /// Whether the connection should generally be kept alive unless /// `max_failures` occur. keep_alive: bool, } impl PingConfig { /// Creates a new `PingConfig` with the following default settings: /// /// * [`PingConfig::with_interval`] 15s /// * [`PingConfig::with_timeout`] 20s /// * [`PingConfig::with_max_failures`] 1 /// * [`PingConfig::with_keep_alive`] false /// /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. /// * Every ping sent must yield a response within 20 seconds in order to /// be successful. /// * A single ping failure is sufficient for the connection to be subject /// to being closed. /// * The connection may be closed at any time as far as the ping protocol /// is concerned, i.e. the ping protocol itself does not keep the /// connection alive. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), interval: Duration::from_secs(15), max_failures: NonZeroU32::new(1).expect("1!= 0"), keep_alive: false } } /// Sets the ping timeout. pub fn with_timeout(mut self, d: Duration) -> Self { self.timeout = d; self } /// Sets the ping interval. pub fn with_interval(mut self, d: Duration) -> Self { self.interval = d; self } /// Sets the maximum number of consecutive ping failures upon which the remote /// peer is considered unreachable and the connection closed. pub fn with_max_failures(mut self, n: NonZeroU32) -> Self { self.max_failures = n; self } /// Sets whether the ping protocol itself should keep the connection alive, /// apart from the maximum allowed failures. /// /// By default, the ping protocol itself allows the connection to be closed /// at any time, i.e. in the absence of ping failures the connection lifetime /// is determined by other protocol handlers. /// /// If the maximum number of allowed ping failures is reached, the /// connection is always terminated as a result of [`PingHandler::poll`] /// returning an error, regardless of the keep-alive setting. pub fn with_keep_alive(mut self, b: bool) -> Self { self.keep_alive = b; self } } /// The result of an inbound or outbound ping. pub type PingResult = Result<PingSuccess, PingFailure>; /// The successful result of processing an inbound or outbound ping. #[derive(Debug)] pub enum PingSuccess { /// Received a ping and sent back a pong. Pong, /// Sent a ping and received back a pong. /// /// Includes the round-trip time. Ping { rtt: Duration }, } /// An outbound ping failure. #[derive(Debug)] pub enum PingFailure { /// The ping timed out, i.e. no response was received within the /// configured ping timeout. Timeout, /// The ping failed for reasons other than a timeout. Other { error: Box<dyn std::error::Error + Send +'static> } } impl fmt::Display for PingFailure { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { PingFailure::Timeout => f.write_str("Ping timeout"), PingFailure::Other { error } => write!(f, "Ping error: {}", error) } } } impl Error for PingFailure { fn source(&self) -> Option<&(dyn Error +'static)> { match self { PingFailure::Timeout => None, PingFailure::Other { error } => Some(&**error) } } } /// Protocol handler that handles pinging the remote at a regular period /// and answering ping queries. /// /// If the remote doesn't respond, produces an error that closes the connection. pub struct PingHandler<TSubstream> { /// Configuration options. config: PingConfig, /// The timer for when to send the next ping. next_ping: Delay, /// The pending results from inbound or outbound pings, ready /// to be `poll()`ed. pending_results: VecDeque<PingResult>, /// The number of consecutive ping failures that occurred. failures: u32, _marker: std::marker::PhantomData<TSubstream> } impl<TSubstream> PingHandler<TSubstream> { /// Builds a new `PingHandler` with the given configuration. pub fn new(config: PingConfig) -> Self { PingHandler { config, next_ping: Delay::new(Instant::now()), pending_results: VecDeque::with_capacity(2), failures: 0, _marker: std::marker::PhantomData } } } impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream> where TSubstream: AsyncRead + AsyncWrite, { type InEvent = Void; type OutEvent = PingResult; type Error = PingFailure; type Substream = TSubstream; type InboundProtocol = protocol::Ping; type OutboundProtocol = protocol::Ping; type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> { SubstreamProtocol::new(protocol::Ping) } fn inject_fully_negotiated_inbound(&mut self, _: ()) { // A ping from a remote peer has been answered. self.pending_results.push_front(Ok(PingSuccess::Pong)); } fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) { // A ping initiated by the local peer was answered by the remote. self.pending_results.push_front(Ok(PingSuccess::Ping { rtt })); } fn inject_event(&mut self, _: Void) {} fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) { self.pending_results.push_front( Err(match error { ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout, e => PingFailure::Other { error: Box::new(e) } })) } fn connection_keep_alive(&self) -> KeepAlive { if self.config.keep_alive {
KeepAlive::Yes } else { KeepAlive::No } } fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> { if let Some(result) = self.pending_results.pop_back() { if let Ok(PingSuccess::Ping {.. }) = result { let next_ping = Instant::now() + self.config.interval; self.failures = 0; self.next_ping.reset(next_ping); } if let Err(e) = result { self.failures += 1; if self.failures >= self.config.max_failures.get() { return Err(e) } else { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e)))) } } return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result))) } match self.next_ping.poll() { Ok(Async::Ready(())) => { self.next_ping.reset(Instant::now() + self.config.timeout); let protocol = SubstreamProtocol::new(protocol::Ping) .with_timeout(self.config.timeout); Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: (), })) }, Ok(Async::NotReady) => Ok(Async::NotReady), Err(e) => Err(PingFailure::Other { error: Box::new(e) }) } } } #[cfg(test)] mod tests { use super::*; use futures::future; use quickcheck::*; use rand::Rng; use tokio_tcp::TcpStream; use tokio::runtime::current_thread::Runtime; impl Arbitrary for PingConfig { fn arbitrary<G: Gen>(g: &mut G) -> PingConfig { PingConfig::new() .with_timeout(Duration::from_secs(g.gen_range(0, 3600))) .with_interval(Duration::from_secs(g.gen_range(0, 3600))) .with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap()) } } fn tick(h: &mut PingHandler<TcpStream>) -> Result< ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, PingFailure > { Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() )) } #[test] fn ping_interval() { fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool { let mut h = PingHandler::<TcpStream>::new(cfg); // The first ping is scheduled "immediately". let start = h.next_ping.deadline(); assert!(start <= Instant::now()); // Send ping match tick(&mut h) { Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => { // The handler must use the configured timeout. assert_eq!(protocol.timeout(), &h.config.timeout); // The next ping must be scheduled no earlier than the ping timeout. assert!(h.next_ping.deadline() >= start + h.config.timeout); } e => panic!("Unexpected event: {:?}", e) } let now = Instant::now(); // Receive pong h.inject_fully_negotiated_outbound(ping_rtt, ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => { // The handler must report the given RTT. assert_eq!(rtt, ping_rtt); // The next ping must be scheduled no earlier than the ping interval. assert!(now + h.config.interval <= h.next_ping.deadline()); } e => panic!("Unexpected event: {:?}", e) } true } quickcheck(prop as fn(_,_) -> _); } #[test] fn max_failures() { let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100)); let mut h = PingHandler::<TcpStream>::new(cfg); for _ in 0.. h.config.max_failures.get() - 1 { h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {} e => panic!("Unexpected event: {:?}", e) } } h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Err(PingFailure::Timeout) => { assert_eq!(h.failures, h.config.max_failures.get()); } e => panic!("Unexpected event: {:?}", e) } h.inject_fully_negotiated_outbound(Duration::from_secs(1), ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping {.. }))) => { // A success resets the counter for consecutive failures. assert_eq!(h.failures, 0); } e => panic!("Unexpected event: {:?}", e) } } }
random_line_split
handler.rs
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use crate::protocol; use futures::prelude::*; use libp2p_core::ProtocolsHandlerEvent; use libp2p_core::protocols_handler::{ KeepAlive, SubstreamProtocol, ProtocolsHandler, ProtocolsHandlerUpgrErr, }; use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration}; use std::collections::VecDeque; use tokio_io::{AsyncRead, AsyncWrite}; use wasm_timer::{Delay, Instant}; use void::Void; /// The configuration for outbound pings. #[derive(Clone, Debug)] pub struct PingConfig { /// The timeout of an outbound ping. timeout: Duration, /// The duration between the last successful outbound or inbound ping /// and the next outbound ping. interval: Duration, /// The maximum number of failed outbound pings before the associated /// connection is deemed unhealthy, indicating to the `Swarm` that it /// should be closed. max_failures: NonZeroU32, /// Whether the connection should generally be kept alive unless /// `max_failures` occur. keep_alive: bool, } impl PingConfig { /// Creates a new `PingConfig` with the following default settings: /// /// * [`PingConfig::with_interval`] 15s /// * [`PingConfig::with_timeout`] 20s /// * [`PingConfig::with_max_failures`] 1 /// * [`PingConfig::with_keep_alive`] false /// /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. /// * Every ping sent must yield a response within 20 seconds in order to /// be successful. /// * A single ping failure is sufficient for the connection to be subject /// to being closed. /// * The connection may be closed at any time as far as the ping protocol /// is concerned, i.e. the ping protocol itself does not keep the /// connection alive. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), interval: Duration::from_secs(15), max_failures: NonZeroU32::new(1).expect("1!= 0"), keep_alive: false } } /// Sets the ping timeout. pub fn with_timeout(mut self, d: Duration) -> Self { self.timeout = d; self } /// Sets the ping interval. pub fn with_interval(mut self, d: Duration) -> Self { self.interval = d; self } /// Sets the maximum number of consecutive ping failures upon which the remote /// peer is considered unreachable and the connection closed. pub fn with_max_failures(mut self, n: NonZeroU32) -> Self { self.max_failures = n; self } /// Sets whether the ping protocol itself should keep the connection alive, /// apart from the maximum allowed failures. /// /// By default, the ping protocol itself allows the connection to be closed /// at any time, i.e. in the absence of ping failures the connection lifetime /// is determined by other protocol handlers. /// /// If the maximum number of allowed ping failures is reached, the /// connection is always terminated as a result of [`PingHandler::poll`] /// returning an error, regardless of the keep-alive setting. pub fn with_keep_alive(mut self, b: bool) -> Self { self.keep_alive = b; self } } /// The result of an inbound or outbound ping. pub type PingResult = Result<PingSuccess, PingFailure>; /// The successful result of processing an inbound or outbound ping. #[derive(Debug)] pub enum PingSuccess { /// Received a ping and sent back a pong. Pong, /// Sent a ping and received back a pong. /// /// Includes the round-trip time. Ping { rtt: Duration }, } /// An outbound ping failure. #[derive(Debug)] pub enum PingFailure { /// The ping timed out, i.e. no response was received within the /// configured ping timeout. Timeout, /// The ping failed for reasons other than a timeout. Other { error: Box<dyn std::error::Error + Send +'static> } } impl fmt::Display for PingFailure { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { PingFailure::Timeout => f.write_str("Ping timeout"), PingFailure::Other { error } => write!(f, "Ping error: {}", error) } } } impl Error for PingFailure { fn source(&self) -> Option<&(dyn Error +'static)> { match self { PingFailure::Timeout => None, PingFailure::Other { error } => Some(&**error) } } } /// Protocol handler that handles pinging the remote at a regular period /// and answering ping queries. /// /// If the remote doesn't respond, produces an error that closes the connection. pub struct PingHandler<TSubstream> { /// Configuration options. config: PingConfig, /// The timer for when to send the next ping. next_ping: Delay, /// The pending results from inbound or outbound pings, ready /// to be `poll()`ed. pending_results: VecDeque<PingResult>, /// The number of consecutive ping failures that occurred. failures: u32, _marker: std::marker::PhantomData<TSubstream> } impl<TSubstream> PingHandler<TSubstream> { /// Builds a new `PingHandler` with the given configuration. pub fn new(config: PingConfig) -> Self { PingHandler { config, next_ping: Delay::new(Instant::now()), pending_results: VecDeque::with_capacity(2), failures: 0, _marker: std::marker::PhantomData } } } impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream> where TSubstream: AsyncRead + AsyncWrite, { type InEvent = Void; type OutEvent = PingResult; type Error = PingFailure; type Substream = TSubstream; type InboundProtocol = protocol::Ping; type OutboundProtocol = protocol::Ping; type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> { SubstreamProtocol::new(protocol::Ping) } fn inject_fully_negotiated_inbound(&mut self, _: ()) { // A ping from a remote peer has been answered. self.pending_results.push_front(Ok(PingSuccess::Pong)); } fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) { // A ping initiated by the local peer was answered by the remote. self.pending_results.push_front(Ok(PingSuccess::Ping { rtt })); } fn inject_event(&mut self, _: Void) {} fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) { self.pending_results.push_front( Err(match error { ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout, e => PingFailure::Other { error: Box::new(e) } })) } fn connection_keep_alive(&self) -> KeepAlive { if self.config.keep_alive { KeepAlive::Yes } else { KeepAlive::No } } fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> { if let Some(result) = self.pending_results.pop_back() { if let Ok(PingSuccess::Ping {.. }) = result { let next_ping = Instant::now() + self.config.interval; self.failures = 0; self.next_ping.reset(next_ping); } if let Err(e) = result { self.failures += 1; if self.failures >= self.config.max_failures.get() { return Err(e) } else { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e)))) } } return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result))) } match self.next_ping.poll() { Ok(Async::Ready(())) => { self.next_ping.reset(Instant::now() + self.config.timeout); let protocol = SubstreamProtocol::new(protocol::Ping) .with_timeout(self.config.timeout); Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: (), })) }, Ok(Async::NotReady) => Ok(Async::NotReady), Err(e) => Err(PingFailure::Other { error: Box::new(e) }) } } } #[cfg(test)] mod tests { use super::*; use futures::future; use quickcheck::*; use rand::Rng; use tokio_tcp::TcpStream; use tokio::runtime::current_thread::Runtime; impl Arbitrary for PingConfig { fn arbitrary<G: Gen>(g: &mut G) -> PingConfig { PingConfig::new() .with_timeout(Duration::from_secs(g.gen_range(0, 3600))) .with_interval(Duration::from_secs(g.gen_range(0, 3600))) .with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap()) } } fn tick(h: &mut PingHandler<TcpStream>) -> Result< ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, PingFailure > { Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() )) } #[test] fn ping_interval() { fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool { let mut h = PingHandler::<TcpStream>::new(cfg); // The first ping is scheduled "immediately". let start = h.next_ping.deadline(); assert!(start <= Instant::now()); // Send ping match tick(&mut h) { Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => { // The handler must use the configured timeout. assert_eq!(protocol.timeout(), &h.config.timeout); // The next ping must be scheduled no earlier than the ping timeout. assert!(h.next_ping.deadline() >= start + h.config.timeout); } e => panic!("Unexpected event: {:?}", e) } let now = Instant::now(); // Receive pong h.inject_fully_negotiated_outbound(ping_rtt, ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => { // The handler must report the given RTT. assert_eq!(rtt, ping_rtt); // The next ping must be scheduled no earlier than the ping interval. assert!(now + h.config.interval <= h.next_ping.deadline()); } e => panic!("Unexpected event: {:?}", e) } true } quickcheck(prop as fn(_,_) -> _); } #[test] fn max_failures() { let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100)); let mut h = PingHandler::<TcpStream>::new(cfg); for _ in 0.. h.config.max_failures.get() - 1 { h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) =>
e => panic!("Unexpected event: {:?}", e) } } h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Err(PingFailure::Timeout) => { assert_eq!(h.failures, h.config.max_failures.get()); } e => panic!("Unexpected event: {:?}", e) } h.inject_fully_negotiated_outbound(Duration::from_secs(1), ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping {.. }))) => { // A success resets the counter for consecutive failures. assert_eq!(h.failures, 0); } e => panic!("Unexpected event: {:?}", e) } } }
{}
conditional_block
handler.rs
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use crate::protocol; use futures::prelude::*; use libp2p_core::ProtocolsHandlerEvent; use libp2p_core::protocols_handler::{ KeepAlive, SubstreamProtocol, ProtocolsHandler, ProtocolsHandlerUpgrErr, }; use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration}; use std::collections::VecDeque; use tokio_io::{AsyncRead, AsyncWrite}; use wasm_timer::{Delay, Instant}; use void::Void; /// The configuration for outbound pings. #[derive(Clone, Debug)] pub struct PingConfig { /// The timeout of an outbound ping. timeout: Duration, /// The duration between the last successful outbound or inbound ping /// and the next outbound ping. interval: Duration, /// The maximum number of failed outbound pings before the associated /// connection is deemed unhealthy, indicating to the `Swarm` that it /// should be closed. max_failures: NonZeroU32, /// Whether the connection should generally be kept alive unless /// `max_failures` occur. keep_alive: bool, } impl PingConfig { /// Creates a new `PingConfig` with the following default settings: /// /// * [`PingConfig::with_interval`] 15s /// * [`PingConfig::with_timeout`] 20s /// * [`PingConfig::with_max_failures`] 1 /// * [`PingConfig::with_keep_alive`] false /// /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. /// * Every ping sent must yield a response within 20 seconds in order to /// be successful. /// * A single ping failure is sufficient for the connection to be subject /// to being closed. /// * The connection may be closed at any time as far as the ping protocol /// is concerned, i.e. the ping protocol itself does not keep the /// connection alive. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), interval: Duration::from_secs(15), max_failures: NonZeroU32::new(1).expect("1!= 0"), keep_alive: false } } /// Sets the ping timeout. pub fn with_timeout(mut self, d: Duration) -> Self { self.timeout = d; self } /// Sets the ping interval. pub fn with_interval(mut self, d: Duration) -> Self { self.interval = d; self } /// Sets the maximum number of consecutive ping failures upon which the remote /// peer is considered unreachable and the connection closed. pub fn with_max_failures(mut self, n: NonZeroU32) -> Self { self.max_failures = n; self } /// Sets whether the ping protocol itself should keep the connection alive, /// apart from the maximum allowed failures. /// /// By default, the ping protocol itself allows the connection to be closed /// at any time, i.e. in the absence of ping failures the connection lifetime /// is determined by other protocol handlers. /// /// If the maximum number of allowed ping failures is reached, the /// connection is always terminated as a result of [`PingHandler::poll`] /// returning an error, regardless of the keep-alive setting. pub fn with_keep_alive(mut self, b: bool) -> Self { self.keep_alive = b; self } } /// The result of an inbound or outbound ping. pub type PingResult = Result<PingSuccess, PingFailure>; /// The successful result of processing an inbound or outbound ping. #[derive(Debug)] pub enum PingSuccess { /// Received a ping and sent back a pong. Pong, /// Sent a ping and received back a pong. /// /// Includes the round-trip time. Ping { rtt: Duration }, } /// An outbound ping failure. #[derive(Debug)] pub enum PingFailure { /// The ping timed out, i.e. no response was received within the /// configured ping timeout. Timeout, /// The ping failed for reasons other than a timeout. Other { error: Box<dyn std::error::Error + Send +'static> } } impl fmt::Display for PingFailure { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { PingFailure::Timeout => f.write_str("Ping timeout"), PingFailure::Other { error } => write!(f, "Ping error: {}", error) } } } impl Error for PingFailure { fn source(&self) -> Option<&(dyn Error +'static)> { match self { PingFailure::Timeout => None, PingFailure::Other { error } => Some(&**error) } } } /// Protocol handler that handles pinging the remote at a regular period /// and answering ping queries. /// /// If the remote doesn't respond, produces an error that closes the connection. pub struct PingHandler<TSubstream> { /// Configuration options. config: PingConfig, /// The timer for when to send the next ping. next_ping: Delay, /// The pending results from inbound or outbound pings, ready /// to be `poll()`ed. pending_results: VecDeque<PingResult>, /// The number of consecutive ping failures that occurred. failures: u32, _marker: std::marker::PhantomData<TSubstream> } impl<TSubstream> PingHandler<TSubstream> { /// Builds a new `PingHandler` with the given configuration. pub fn
(config: PingConfig) -> Self { PingHandler { config, next_ping: Delay::new(Instant::now()), pending_results: VecDeque::with_capacity(2), failures: 0, _marker: std::marker::PhantomData } } } impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream> where TSubstream: AsyncRead + AsyncWrite, { type InEvent = Void; type OutEvent = PingResult; type Error = PingFailure; type Substream = TSubstream; type InboundProtocol = protocol::Ping; type OutboundProtocol = protocol::Ping; type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> { SubstreamProtocol::new(protocol::Ping) } fn inject_fully_negotiated_inbound(&mut self, _: ()) { // A ping from a remote peer has been answered. self.pending_results.push_front(Ok(PingSuccess::Pong)); } fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) { // A ping initiated by the local peer was answered by the remote. self.pending_results.push_front(Ok(PingSuccess::Ping { rtt })); } fn inject_event(&mut self, _: Void) {} fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) { self.pending_results.push_front( Err(match error { ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout, e => PingFailure::Other { error: Box::new(e) } })) } fn connection_keep_alive(&self) -> KeepAlive { if self.config.keep_alive { KeepAlive::Yes } else { KeepAlive::No } } fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> { if let Some(result) = self.pending_results.pop_back() { if let Ok(PingSuccess::Ping {.. }) = result { let next_ping = Instant::now() + self.config.interval; self.failures = 0; self.next_ping.reset(next_ping); } if let Err(e) = result { self.failures += 1; if self.failures >= self.config.max_failures.get() { return Err(e) } else { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e)))) } } return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result))) } match self.next_ping.poll() { Ok(Async::Ready(())) => { self.next_ping.reset(Instant::now() + self.config.timeout); let protocol = SubstreamProtocol::new(protocol::Ping) .with_timeout(self.config.timeout); Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: (), })) }, Ok(Async::NotReady) => Ok(Async::NotReady), Err(e) => Err(PingFailure::Other { error: Box::new(e) }) } } } #[cfg(test)] mod tests { use super::*; use futures::future; use quickcheck::*; use rand::Rng; use tokio_tcp::TcpStream; use tokio::runtime::current_thread::Runtime; impl Arbitrary for PingConfig { fn arbitrary<G: Gen>(g: &mut G) -> PingConfig { PingConfig::new() .with_timeout(Duration::from_secs(g.gen_range(0, 3600))) .with_interval(Duration::from_secs(g.gen_range(0, 3600))) .with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap()) } } fn tick(h: &mut PingHandler<TcpStream>) -> Result< ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, PingFailure > { Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() )) } #[test] fn ping_interval() { fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool { let mut h = PingHandler::<TcpStream>::new(cfg); // The first ping is scheduled "immediately". let start = h.next_ping.deadline(); assert!(start <= Instant::now()); // Send ping match tick(&mut h) { Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => { // The handler must use the configured timeout. assert_eq!(protocol.timeout(), &h.config.timeout); // The next ping must be scheduled no earlier than the ping timeout. assert!(h.next_ping.deadline() >= start + h.config.timeout); } e => panic!("Unexpected event: {:?}", e) } let now = Instant::now(); // Receive pong h.inject_fully_negotiated_outbound(ping_rtt, ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => { // The handler must report the given RTT. assert_eq!(rtt, ping_rtt); // The next ping must be scheduled no earlier than the ping interval. assert!(now + h.config.interval <= h.next_ping.deadline()); } e => panic!("Unexpected event: {:?}", e) } true } quickcheck(prop as fn(_,_) -> _); } #[test] fn max_failures() { let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100)); let mut h = PingHandler::<TcpStream>::new(cfg); for _ in 0.. h.config.max_failures.get() - 1 { h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {} e => panic!("Unexpected event: {:?}", e) } } h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Err(PingFailure::Timeout) => { assert_eq!(h.failures, h.config.max_failures.get()); } e => panic!("Unexpected event: {:?}", e) } h.inject_fully_negotiated_outbound(Duration::from_secs(1), ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping {.. }))) => { // A success resets the counter for consecutive failures. assert_eq!(h.failures, 0); } e => panic!("Unexpected event: {:?}", e) } } }
new
identifier_name
handler.rs
// Copyright 2019 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use crate::protocol; use futures::prelude::*; use libp2p_core::ProtocolsHandlerEvent; use libp2p_core::protocols_handler::{ KeepAlive, SubstreamProtocol, ProtocolsHandler, ProtocolsHandlerUpgrErr, }; use std::{error::Error, io, fmt, num::NonZeroU32, time::Duration}; use std::collections::VecDeque; use tokio_io::{AsyncRead, AsyncWrite}; use wasm_timer::{Delay, Instant}; use void::Void; /// The configuration for outbound pings. #[derive(Clone, Debug)] pub struct PingConfig { /// The timeout of an outbound ping. timeout: Duration, /// The duration between the last successful outbound or inbound ping /// and the next outbound ping. interval: Duration, /// The maximum number of failed outbound pings before the associated /// connection is deemed unhealthy, indicating to the `Swarm` that it /// should be closed. max_failures: NonZeroU32, /// Whether the connection should generally be kept alive unless /// `max_failures` occur. keep_alive: bool, } impl PingConfig { /// Creates a new `PingConfig` with the following default settings: /// /// * [`PingConfig::with_interval`] 15s /// * [`PingConfig::with_timeout`] 20s /// * [`PingConfig::with_max_failures`] 1 /// * [`PingConfig::with_keep_alive`] false /// /// These settings have the following effect: /// /// * A ping is sent every 15 seconds on a healthy connection. /// * Every ping sent must yield a response within 20 seconds in order to /// be successful. /// * A single ping failure is sufficient for the connection to be subject /// to being closed. /// * The connection may be closed at any time as far as the ping protocol /// is concerned, i.e. the ping protocol itself does not keep the /// connection alive. pub fn new() -> Self { Self { timeout: Duration::from_secs(20), interval: Duration::from_secs(15), max_failures: NonZeroU32::new(1).expect("1!= 0"), keep_alive: false } } /// Sets the ping timeout. pub fn with_timeout(mut self, d: Duration) -> Self { self.timeout = d; self } /// Sets the ping interval. pub fn with_interval(mut self, d: Duration) -> Self { self.interval = d; self } /// Sets the maximum number of consecutive ping failures upon which the remote /// peer is considered unreachable and the connection closed. pub fn with_max_failures(mut self, n: NonZeroU32) -> Self { self.max_failures = n; self } /// Sets whether the ping protocol itself should keep the connection alive, /// apart from the maximum allowed failures. /// /// By default, the ping protocol itself allows the connection to be closed /// at any time, i.e. in the absence of ping failures the connection lifetime /// is determined by other protocol handlers. /// /// If the maximum number of allowed ping failures is reached, the /// connection is always terminated as a result of [`PingHandler::poll`] /// returning an error, regardless of the keep-alive setting. pub fn with_keep_alive(mut self, b: bool) -> Self { self.keep_alive = b; self } } /// The result of an inbound or outbound ping. pub type PingResult = Result<PingSuccess, PingFailure>; /// The successful result of processing an inbound or outbound ping. #[derive(Debug)] pub enum PingSuccess { /// Received a ping and sent back a pong. Pong, /// Sent a ping and received back a pong. /// /// Includes the round-trip time. Ping { rtt: Duration }, } /// An outbound ping failure. #[derive(Debug)] pub enum PingFailure { /// The ping timed out, i.e. no response was received within the /// configured ping timeout. Timeout, /// The ping failed for reasons other than a timeout. Other { error: Box<dyn std::error::Error + Send +'static> } } impl fmt::Display for PingFailure { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { PingFailure::Timeout => f.write_str("Ping timeout"), PingFailure::Other { error } => write!(f, "Ping error: {}", error) } } } impl Error for PingFailure { fn source(&self) -> Option<&(dyn Error +'static)> { match self { PingFailure::Timeout => None, PingFailure::Other { error } => Some(&**error) } } } /// Protocol handler that handles pinging the remote at a regular period /// and answering ping queries. /// /// If the remote doesn't respond, produces an error that closes the connection. pub struct PingHandler<TSubstream> { /// Configuration options. config: PingConfig, /// The timer for when to send the next ping. next_ping: Delay, /// The pending results from inbound or outbound pings, ready /// to be `poll()`ed. pending_results: VecDeque<PingResult>, /// The number of consecutive ping failures that occurred. failures: u32, _marker: std::marker::PhantomData<TSubstream> } impl<TSubstream> PingHandler<TSubstream> { /// Builds a new `PingHandler` with the given configuration. pub fn new(config: PingConfig) -> Self { PingHandler { config, next_ping: Delay::new(Instant::now()), pending_results: VecDeque::with_capacity(2), failures: 0, _marker: std::marker::PhantomData } } } impl<TSubstream> ProtocolsHandler for PingHandler<TSubstream> where TSubstream: AsyncRead + AsyncWrite, { type InEvent = Void; type OutEvent = PingResult; type Error = PingFailure; type Substream = TSubstream; type InboundProtocol = protocol::Ping; type OutboundProtocol = protocol::Ping; type OutboundOpenInfo = (); fn listen_protocol(&self) -> SubstreamProtocol<protocol::Ping> { SubstreamProtocol::new(protocol::Ping) } fn inject_fully_negotiated_inbound(&mut self, _: ())
fn inject_fully_negotiated_outbound(&mut self, rtt: Duration, _info: ()) { // A ping initiated by the local peer was answered by the remote. self.pending_results.push_front(Ok(PingSuccess::Ping { rtt })); } fn inject_event(&mut self, _: Void) {} fn inject_dial_upgrade_error(&mut self, _info: (), error: ProtocolsHandlerUpgrErr<io::Error>) { self.pending_results.push_front( Err(match error { ProtocolsHandlerUpgrErr::Timeout => PingFailure::Timeout, e => PingFailure::Other { error: Box::new(e) } })) } fn connection_keep_alive(&self) -> KeepAlive { if self.config.keep_alive { KeepAlive::Yes } else { KeepAlive::No } } fn poll(&mut self) -> Poll<ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, Self::Error> { if let Some(result) = self.pending_results.pop_back() { if let Ok(PingSuccess::Ping {.. }) = result { let next_ping = Instant::now() + self.config.interval; self.failures = 0; self.next_ping.reset(next_ping); } if let Err(e) = result { self.failures += 1; if self.failures >= self.config.max_failures.get() { return Err(e) } else { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(Err(e)))) } } return Ok(Async::Ready(ProtocolsHandlerEvent::Custom(result))) } match self.next_ping.poll() { Ok(Async::Ready(())) => { self.next_ping.reset(Instant::now() + self.config.timeout); let protocol = SubstreamProtocol::new(protocol::Ping) .with_timeout(self.config.timeout); Ok(Async::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: (), })) }, Ok(Async::NotReady) => Ok(Async::NotReady), Err(e) => Err(PingFailure::Other { error: Box::new(e) }) } } } #[cfg(test)] mod tests { use super::*; use futures::future; use quickcheck::*; use rand::Rng; use tokio_tcp::TcpStream; use tokio::runtime::current_thread::Runtime; impl Arbitrary for PingConfig { fn arbitrary<G: Gen>(g: &mut G) -> PingConfig { PingConfig::new() .with_timeout(Duration::from_secs(g.gen_range(0, 3600))) .with_interval(Duration::from_secs(g.gen_range(0, 3600))) .with_max_failures(NonZeroU32::new(g.gen_range(1, 100)).unwrap()) } } fn tick(h: &mut PingHandler<TcpStream>) -> Result< ProtocolsHandlerEvent<protocol::Ping, (), PingResult>, PingFailure > { Runtime::new().unwrap().block_on(future::poll_fn(|| h.poll() )) } #[test] fn ping_interval() { fn prop(cfg: PingConfig, ping_rtt: Duration) -> bool { let mut h = PingHandler::<TcpStream>::new(cfg); // The first ping is scheduled "immediately". let start = h.next_ping.deadline(); assert!(start <= Instant::now()); // Send ping match tick(&mut h) { Ok(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: _ }) => { // The handler must use the configured timeout. assert_eq!(protocol.timeout(), &h.config.timeout); // The next ping must be scheduled no earlier than the ping timeout. assert!(h.next_ping.deadline() >= start + h.config.timeout); } e => panic!("Unexpected event: {:?}", e) } let now = Instant::now(); // Receive pong h.inject_fully_negotiated_outbound(ping_rtt, ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping { rtt }))) => { // The handler must report the given RTT. assert_eq!(rtt, ping_rtt); // The next ping must be scheduled no earlier than the ping interval. assert!(now + h.config.interval <= h.next_ping.deadline()); } e => panic!("Unexpected event: {:?}", e) } true } quickcheck(prop as fn(_,_) -> _); } #[test] fn max_failures() { let cfg = PingConfig::arbitrary(&mut StdGen::new(rand::thread_rng(), 100)); let mut h = PingHandler::<TcpStream>::new(cfg); for _ in 0.. h.config.max_failures.get() - 1 { h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Err(PingFailure::Timeout))) => {} e => panic!("Unexpected event: {:?}", e) } } h.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout); match tick(&mut h) { Err(PingFailure::Timeout) => { assert_eq!(h.failures, h.config.max_failures.get()); } e => panic!("Unexpected event: {:?}", e) } h.inject_fully_negotiated_outbound(Duration::from_secs(1), ()); match tick(&mut h) { Ok(ProtocolsHandlerEvent::Custom(Ok(PingSuccess::Ping {.. }))) => { // A success resets the counter for consecutive failures. assert_eq!(h.failures, 0); } e => panic!("Unexpected event: {:?}", e) } } }
{ // A ping from a remote peer has been answered. self.pending_results.push_front(Ok(PingSuccess::Pong)); }
identifier_body
dict.rs
use bitflags::bitflags; use std::{ffi::CStr, fmt, marker::PhantomData}; pub trait ReadableDict { /// Obtain the pointer to the raw `spa_dict` struct. fn get_dict_ptr(&self) -> *const spa_sys::spa_dict; /// An iterator over all raw key-value pairs. /// The iterator element type is `(&CStr, &CStr)`. fn iter_cstr(&self) -> CIter { let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items }; CIter { next: first_elem_ptr, end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) }, _phantom: PhantomData, } } /// An iterator over all key-value pairs that are valid utf-8. /// The iterator element type is `(&str, &str)`. fn iter(&self) -> Iter { Iter { inner: self.iter_cstr(), } } /// An iterator over all keys that are valid utf-8. /// The iterator element type is &str. fn keys(&self) -> Keys { Keys { inner: self.iter_cstr(), } } /// An iterator over all values that are valid utf-8. /// The iterator element type is &str. fn values(&self) -> Values { Values { inner: self.iter_cstr(), } } /// Returns the number of key-value-pairs in the dict. /// This is the number of all pairs, not only pairs that are valid-utf8. fn len(&self) -> usize { unsafe { (*self.get_dict_ptr()).n_items as usize } } /// Returns `true` if the dict is empty, `false` if it is not. fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the bitflags that are set for the dict. fn flags(&self) -> Flags { Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags }) } /// Get the value associated with the provided key. /// /// If the dict does not contain the key or the value is non-utf8, `None` is returned. /// Use [`iter_cstr`] if you need a non-utf8 key or value. /// /// [`iter_cstr`]: #method.iter_cstr // FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings. // Perhaps we should return an enum that can be any of these values. // See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914. fn get(&self, key: &str) -> Option<&str> { self.iter().find(|(k, _)| *k == key).map(|(_, v)| v) } } pub trait WritableDict { /// Insert the key-value pair, overwriting any old value. fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T); /// Remove the key-value pair if it exists. fn remove<T: Into<Vec<u8>>>(&mut self, key: T); /// Clear the object, removing all key-value pairs. fn clear(&mut self); } /// A wrapper for a `*const spa_dict` struct that does not take ownership of the data, /// useful for dicts shared to us via FFI. pub struct ForeignDict(*const spa_sys::spa_dict); impl ForeignDict { /// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to. /// /// # Safety /// /// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`. /// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`. /// /// Violating any of these rules will result in undefined behaviour. pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self { debug_assert!( !dict.is_null(), "Dict must not be created from a pointer that is NULL" ); Self(dict) } } impl ReadableDict for ForeignDict { fn get_dict_ptr(&self) -> *const spa_sys::spa_dict { self.0 } } impl fmt::Debug for ForeignDict { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: Find a way to display flags too. f.debug_map().entries(self.iter_cstr()).finish() } } bitflags! { pub struct Flags: u32 { // These flags are redefinitions from // https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h const SORTED = spa_sys::SPA_DICT_FLAG_SORTED; } } pub struct CIter<'a> { next: *const spa_sys::spa_dict_item, /// Points to the first element outside of the allocated area. end: *const spa_sys::spa_dict_item, _phantom: PhantomData<&'a str>, } impl<'a> Iterator for CIter<'a> { type Item = (&'a CStr, &'a CStr); fn next(&mut self) -> Option<Self::Item> { if!self.next.is_null() && self.next < self.end { let k = unsafe { CStr::from_ptr((*self.next).key) }; let v = unsafe { CStr::from_ptr((*self.next).value) }; self.next = unsafe { self.next.add(1) }; Some((k, v)) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let bound: usize = unsafe { self.next.offset_from(self.end) as usize }; // We know the exact value, so lower bound and upper bound are the same. (bound, Some(bound)) } } pub struct Iter<'a> { inner: CIter<'a>, } impl<'a> Iterator for Iter<'a> { type Item = (&'a str, &'a str); fn next(&mut self) -> Option<Self::Item> { self.inner .find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok())) } fn size_hint(&self) -> (usize, Option<usize>) { // Lower bound is 0, as all keys left might not be valid UTF-8. (0, self.inner.size_hint().1) } } pub struct Keys<'a> { inner: CIter<'a>, } impl<'a> Iterator for Keys<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(k, _)| k.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } pub struct Values<'a> { inner: CIter<'a>, } impl<'a> Iterator for Values<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(_, v)| v.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } #[cfg(test)] mod tests { use super::{Flags, ForeignDict, ReadableDict}; use spa_sys::{spa_dict, spa_dict_item}; use std::{ffi::CString, ptr}; /// Create a raw dict with the specified number of key-value pairs. /// /// `num_items` must not be zero, or this function will panic. /// /// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`. /// /// The function returns a tuple consisting of: /// 1. An allocation (`Vec`) containing the raw Key and Value Strings. /// 2. An allocation (`Vec`) containing all the items. /// 3. The created `spa_dict` struct. /// /// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct. fn make_raw_dict( num_items: u32, ) -> ( Vec<(CString, CString)>, Vec<spa_dict_item>, spa_sys::spa_dict, ) { assert!(num_items!= 0, "num_items must not be zero"); let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize); let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize); for i in 0..num_items { let k = CString::new(format!("K{}", i)).unwrap(); let v = CString::new(format!("V{}", i)).unwrap(); let item = spa_dict_item { key: k.as_ptr(), value: v.as_ptr(), }; strings.push((k, v)); items.push(item); } let raw = spa_dict { flags: Flags::empty().bits, n_items: num_items, items: items.as_ptr(), }; (strings, items, raw) } #[test] fn test_empty_dict() { let raw = spa_dict { flags: Flags::empty().bits, n_items: 0, items: ptr::null(), }; let dict = unsafe { ForeignDict::from_ptr(&raw) }; let iter = dict.iter_cstr(); assert_eq!(0, dict.len()); iter.for_each(|_| panic!("Iterated over non-existing item")); } #[test] fn
() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter_cstr(); assert_eq!( ( CString::new("K0").unwrap().as_c_str(), CString::new("V0").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!( ( CString::new("K1").unwrap().as_c_str(), CString::new("V1").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn test_iterators() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter(); assert_eq!(("K0", "V0"), iter.next().unwrap()); assert_eq!(("K1", "V1"), iter.next().unwrap()); assert_eq!(None, iter.next()); let mut key_iter = dict.keys(); assert_eq!("K0", key_iter.next().unwrap()); assert_eq!("K1", key_iter.next().unwrap()); assert_eq!(None, key_iter.next()); let mut val_iter = dict.values(); assert_eq!("V0", val_iter.next().unwrap()); assert_eq!("V1", val_iter.next().unwrap()); assert_eq!(None, val_iter.next()); } #[test] fn test_get() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(Some("V0"), dict.get("K0")); } #[test] fn test_debug() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict)) } }
test_iter_cstr
identifier_name
dict.rs
use bitflags::bitflags; use std::{ffi::CStr, fmt, marker::PhantomData}; pub trait ReadableDict { /// Obtain the pointer to the raw `spa_dict` struct. fn get_dict_ptr(&self) -> *const spa_sys::spa_dict; /// An iterator over all raw key-value pairs. /// The iterator element type is `(&CStr, &CStr)`. fn iter_cstr(&self) -> CIter { let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items }; CIter { next: first_elem_ptr, end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) }, _phantom: PhantomData, } } /// An iterator over all key-value pairs that are valid utf-8. /// The iterator element type is `(&str, &str)`. fn iter(&self) -> Iter { Iter { inner: self.iter_cstr(), } } /// An iterator over all keys that are valid utf-8. /// The iterator element type is &str. fn keys(&self) -> Keys { Keys { inner: self.iter_cstr(), } } /// An iterator over all values that are valid utf-8. /// The iterator element type is &str. fn values(&self) -> Values { Values { inner: self.iter_cstr(), } } /// Returns the number of key-value-pairs in the dict. /// This is the number of all pairs, not only pairs that are valid-utf8. fn len(&self) -> usize { unsafe { (*self.get_dict_ptr()).n_items as usize } } /// Returns `true` if the dict is empty, `false` if it is not. fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the bitflags that are set for the dict. fn flags(&self) -> Flags { Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags }) } /// Get the value associated with the provided key. /// /// If the dict does not contain the key or the value is non-utf8, `None` is returned. /// Use [`iter_cstr`] if you need a non-utf8 key or value. /// /// [`iter_cstr`]: #method.iter_cstr // FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings. // Perhaps we should return an enum that can be any of these values. // See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914. fn get(&self, key: &str) -> Option<&str> { self.iter().find(|(k, _)| *k == key).map(|(_, v)| v) } } pub trait WritableDict { /// Insert the key-value pair, overwriting any old value. fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T); /// Remove the key-value pair if it exists. fn remove<T: Into<Vec<u8>>>(&mut self, key: T); /// Clear the object, removing all key-value pairs. fn clear(&mut self); } /// A wrapper for a `*const spa_dict` struct that does not take ownership of the data, /// useful for dicts shared to us via FFI. pub struct ForeignDict(*const spa_sys::spa_dict); impl ForeignDict { /// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to. /// /// # Safety /// /// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`. /// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`. /// /// Violating any of these rules will result in undefined behaviour. pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self { debug_assert!( !dict.is_null(), "Dict must not be created from a pointer that is NULL" ); Self(dict) } } impl ReadableDict for ForeignDict { fn get_dict_ptr(&self) -> *const spa_sys::spa_dict { self.0 } } impl fmt::Debug for ForeignDict { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: Find a way to display flags too. f.debug_map().entries(self.iter_cstr()).finish() } } bitflags! { pub struct Flags: u32 { // These flags are redefinitions from // https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h const SORTED = spa_sys::SPA_DICT_FLAG_SORTED; } } pub struct CIter<'a> { next: *const spa_sys::spa_dict_item, /// Points to the first element outside of the allocated area. end: *const spa_sys::spa_dict_item, _phantom: PhantomData<&'a str>, } impl<'a> Iterator for CIter<'a> { type Item = (&'a CStr, &'a CStr); fn next(&mut self) -> Option<Self::Item> { if!self.next.is_null() && self.next < self.end { let k = unsafe { CStr::from_ptr((*self.next).key) }; let v = unsafe { CStr::from_ptr((*self.next).value) }; self.next = unsafe { self.next.add(1) }; Some((k, v)) } else
} fn size_hint(&self) -> (usize, Option<usize>) { let bound: usize = unsafe { self.next.offset_from(self.end) as usize }; // We know the exact value, so lower bound and upper bound are the same. (bound, Some(bound)) } } pub struct Iter<'a> { inner: CIter<'a>, } impl<'a> Iterator for Iter<'a> { type Item = (&'a str, &'a str); fn next(&mut self) -> Option<Self::Item> { self.inner .find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok())) } fn size_hint(&self) -> (usize, Option<usize>) { // Lower bound is 0, as all keys left might not be valid UTF-8. (0, self.inner.size_hint().1) } } pub struct Keys<'a> { inner: CIter<'a>, } impl<'a> Iterator for Keys<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(k, _)| k.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } pub struct Values<'a> { inner: CIter<'a>, } impl<'a> Iterator for Values<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(_, v)| v.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } #[cfg(test)] mod tests { use super::{Flags, ForeignDict, ReadableDict}; use spa_sys::{spa_dict, spa_dict_item}; use std::{ffi::CString, ptr}; /// Create a raw dict with the specified number of key-value pairs. /// /// `num_items` must not be zero, or this function will panic. /// /// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`. /// /// The function returns a tuple consisting of: /// 1. An allocation (`Vec`) containing the raw Key and Value Strings. /// 2. An allocation (`Vec`) containing all the items. /// 3. The created `spa_dict` struct. /// /// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct. fn make_raw_dict( num_items: u32, ) -> ( Vec<(CString, CString)>, Vec<spa_dict_item>, spa_sys::spa_dict, ) { assert!(num_items!= 0, "num_items must not be zero"); let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize); let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize); for i in 0..num_items { let k = CString::new(format!("K{}", i)).unwrap(); let v = CString::new(format!("V{}", i)).unwrap(); let item = spa_dict_item { key: k.as_ptr(), value: v.as_ptr(), }; strings.push((k, v)); items.push(item); } let raw = spa_dict { flags: Flags::empty().bits, n_items: num_items, items: items.as_ptr(), }; (strings, items, raw) } #[test] fn test_empty_dict() { let raw = spa_dict { flags: Flags::empty().bits, n_items: 0, items: ptr::null(), }; let dict = unsafe { ForeignDict::from_ptr(&raw) }; let iter = dict.iter_cstr(); assert_eq!(0, dict.len()); iter.for_each(|_| panic!("Iterated over non-existing item")); } #[test] fn test_iter_cstr() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter_cstr(); assert_eq!( ( CString::new("K0").unwrap().as_c_str(), CString::new("V0").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!( ( CString::new("K1").unwrap().as_c_str(), CString::new("V1").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn test_iterators() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter(); assert_eq!(("K0", "V0"), iter.next().unwrap()); assert_eq!(("K1", "V1"), iter.next().unwrap()); assert_eq!(None, iter.next()); let mut key_iter = dict.keys(); assert_eq!("K0", key_iter.next().unwrap()); assert_eq!("K1", key_iter.next().unwrap()); assert_eq!(None, key_iter.next()); let mut val_iter = dict.values(); assert_eq!("V0", val_iter.next().unwrap()); assert_eq!("V1", val_iter.next().unwrap()); assert_eq!(None, val_iter.next()); } #[test] fn test_get() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(Some("V0"), dict.get("K0")); } #[test] fn test_debug() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict)) } }
{ None }
conditional_block
dict.rs
use bitflags::bitflags; use std::{ffi::CStr, fmt, marker::PhantomData}; pub trait ReadableDict { /// Obtain the pointer to the raw `spa_dict` struct. fn get_dict_ptr(&self) -> *const spa_sys::spa_dict; /// An iterator over all raw key-value pairs. /// The iterator element type is `(&CStr, &CStr)`. fn iter_cstr(&self) -> CIter { let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items }; CIter { next: first_elem_ptr, end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) }, _phantom: PhantomData, } } /// An iterator over all key-value pairs that are valid utf-8. /// The iterator element type is `(&str, &str)`. fn iter(&self) -> Iter { Iter { inner: self.iter_cstr(), } } /// An iterator over all keys that are valid utf-8. /// The iterator element type is &str. fn keys(&self) -> Keys { Keys { inner: self.iter_cstr(), } } /// An iterator over all values that are valid utf-8. /// The iterator element type is &str. fn values(&self) -> Values { Values { inner: self.iter_cstr(), } } /// Returns the number of key-value-pairs in the dict. /// This is the number of all pairs, not only pairs that are valid-utf8. fn len(&self) -> usize { unsafe { (*self.get_dict_ptr()).n_items as usize } } /// Returns `true` if the dict is empty, `false` if it is not. fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the bitflags that are set for the dict. fn flags(&self) -> Flags { Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags }) } /// Get the value associated with the provided key. /// /// If the dict does not contain the key or the value is non-utf8, `None` is returned. /// Use [`iter_cstr`] if you need a non-utf8 key or value. /// /// [`iter_cstr`]: #method.iter_cstr // FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings. // Perhaps we should return an enum that can be any of these values. // See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914. fn get(&self, key: &str) -> Option<&str> { self.iter().find(|(k, _)| *k == key).map(|(_, v)| v) } } pub trait WritableDict { /// Insert the key-value pair, overwriting any old value. fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T); /// Remove the key-value pair if it exists. fn remove<T: Into<Vec<u8>>>(&mut self, key: T); /// Clear the object, removing all key-value pairs. fn clear(&mut self); } /// A wrapper for a `*const spa_dict` struct that does not take ownership of the data, /// useful for dicts shared to us via FFI. pub struct ForeignDict(*const spa_sys::spa_dict); impl ForeignDict { /// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to. /// /// # Safety /// /// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`. /// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`. /// /// Violating any of these rules will result in undefined behaviour. pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self { debug_assert!( !dict.is_null(), "Dict must not be created from a pointer that is NULL" ); Self(dict) } } impl ReadableDict for ForeignDict { fn get_dict_ptr(&self) -> *const spa_sys::spa_dict { self.0 } } impl fmt::Debug for ForeignDict { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: Find a way to display flags too. f.debug_map().entries(self.iter_cstr()).finish() } } bitflags! { pub struct Flags: u32 { // These flags are redefinitions from // https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h const SORTED = spa_sys::SPA_DICT_FLAG_SORTED; } } pub struct CIter<'a> { next: *const spa_sys::spa_dict_item, /// Points to the first element outside of the allocated area. end: *const spa_sys::spa_dict_item, _phantom: PhantomData<&'a str>, } impl<'a> Iterator for CIter<'a> { type Item = (&'a CStr, &'a CStr); fn next(&mut self) -> Option<Self::Item> { if!self.next.is_null() && self.next < self.end { let k = unsafe { CStr::from_ptr((*self.next).key) }; let v = unsafe { CStr::from_ptr((*self.next).value) }; self.next = unsafe { self.next.add(1) }; Some((k, v)) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let bound: usize = unsafe { self.next.offset_from(self.end) as usize }; // We know the exact value, so lower bound and upper bound are the same. (bound, Some(bound)) } } pub struct Iter<'a> { inner: CIter<'a>, } impl<'a> Iterator for Iter<'a> { type Item = (&'a str, &'a str); fn next(&mut self) -> Option<Self::Item> { self.inner .find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok())) } fn size_hint(&self) -> (usize, Option<usize>) { // Lower bound is 0, as all keys left might not be valid UTF-8. (0, self.inner.size_hint().1) } } pub struct Keys<'a> { inner: CIter<'a>, } impl<'a> Iterator for Keys<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(k, _)| k.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } pub struct Values<'a> { inner: CIter<'a>, } impl<'a> Iterator for Values<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(_, v)| v.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } #[cfg(test)] mod tests { use super::{Flags, ForeignDict, ReadableDict}; use spa_sys::{spa_dict, spa_dict_item}; use std::{ffi::CString, ptr}; /// Create a raw dict with the specified number of key-value pairs. /// /// `num_items` must not be zero, or this function will panic. /// /// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`. /// /// The function returns a tuple consisting of: /// 1. An allocation (`Vec`) containing the raw Key and Value Strings. /// 2. An allocation (`Vec`) containing all the items. /// 3. The created `spa_dict` struct. /// /// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct. fn make_raw_dict( num_items: u32, ) -> ( Vec<(CString, CString)>, Vec<spa_dict_item>, spa_sys::spa_dict, ) { assert!(num_items!= 0, "num_items must not be zero"); let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize); let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize); for i in 0..num_items { let k = CString::new(format!("K{}", i)).unwrap(); let v = CString::new(format!("V{}", i)).unwrap(); let item = spa_dict_item { key: k.as_ptr(), value: v.as_ptr(), }; strings.push((k, v)); items.push(item); } let raw = spa_dict { flags: Flags::empty().bits, n_items: num_items, items: items.as_ptr(), }; (strings, items, raw) } #[test] fn test_empty_dict() { let raw = spa_dict { flags: Flags::empty().bits, n_items: 0, items: ptr::null(), }; let dict = unsafe { ForeignDict::from_ptr(&raw) }; let iter = dict.iter_cstr(); assert_eq!(0, dict.len()); iter.for_each(|_| panic!("Iterated over non-existing item")); } #[test] fn test_iter_cstr() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter_cstr(); assert_eq!( ( CString::new("K0").unwrap().as_c_str(), CString::new("V0").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!( ( CString::new("K1").unwrap().as_c_str(), CString::new("V1").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn test_iterators() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter(); assert_eq!(("K0", "V0"), iter.next().unwrap()); assert_eq!(("K1", "V1"), iter.next().unwrap()); assert_eq!(None, iter.next()); let mut key_iter = dict.keys(); assert_eq!("K0", key_iter.next().unwrap()); assert_eq!("K1", key_iter.next().unwrap()); assert_eq!(None, key_iter.next()); let mut val_iter = dict.values(); assert_eq!("V0", val_iter.next().unwrap()); assert_eq!("V1", val_iter.next().unwrap()); assert_eq!(None, val_iter.next()); } #[test] fn test_get() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(Some("V0"), dict.get("K0")); } #[test] fn test_debug()
}
{ let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict)) }
identifier_body
dict.rs
use bitflags::bitflags; use std::{ffi::CStr, fmt, marker::PhantomData}; pub trait ReadableDict { /// Obtain the pointer to the raw `spa_dict` struct. fn get_dict_ptr(&self) -> *const spa_sys::spa_dict; /// An iterator over all raw key-value pairs. /// The iterator element type is `(&CStr, &CStr)`. fn iter_cstr(&self) -> CIter { let first_elem_ptr = unsafe { (*self.get_dict_ptr()).items }; CIter { next: first_elem_ptr, end: unsafe { first_elem_ptr.offset((*self.get_dict_ptr()).n_items as isize) }, _phantom: PhantomData, } } /// An iterator over all key-value pairs that are valid utf-8. /// The iterator element type is `(&str, &str)`. fn iter(&self) -> Iter { Iter { inner: self.iter_cstr(), } } /// An iterator over all keys that are valid utf-8. /// The iterator element type is &str. fn keys(&self) -> Keys { Keys { inner: self.iter_cstr(), } } /// An iterator over all values that are valid utf-8. /// The iterator element type is &str. fn values(&self) -> Values { Values { inner: self.iter_cstr(), } } /// Returns the number of key-value-pairs in the dict. /// This is the number of all pairs, not only pairs that are valid-utf8. fn len(&self) -> usize { unsafe { (*self.get_dict_ptr()).n_items as usize } } /// Returns `true` if the dict is empty, `false` if it is not.
fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the bitflags that are set for the dict. fn flags(&self) -> Flags { Flags::from_bits_truncate(unsafe { (*self.get_dict_ptr()).flags }) } /// Get the value associated with the provided key. /// /// If the dict does not contain the key or the value is non-utf8, `None` is returned. /// Use [`iter_cstr`] if you need a non-utf8 key or value. /// /// [`iter_cstr`]: #method.iter_cstr // FIXME: Some items might be integers, booleans, floats, doubles or pointers instead of strings. // Perhaps we should return an enum that can be any of these values. // See https://gitlab.freedesktop.org/pipewire/pipewire-rs/-/merge_requests/12#note_695914. fn get(&self, key: &str) -> Option<&str> { self.iter().find(|(k, _)| *k == key).map(|(_, v)| v) } } pub trait WritableDict { /// Insert the key-value pair, overwriting any old value. fn insert<T: Into<Vec<u8>>>(&mut self, key: T, value: T); /// Remove the key-value pair if it exists. fn remove<T: Into<Vec<u8>>>(&mut self, key: T); /// Clear the object, removing all key-value pairs. fn clear(&mut self); } /// A wrapper for a `*const spa_dict` struct that does not take ownership of the data, /// useful for dicts shared to us via FFI. pub struct ForeignDict(*const spa_sys::spa_dict); impl ForeignDict { /// Wraps the provided pointer in a read-only `ForeignDict` struct without taking ownership of the struct pointed to. /// /// # Safety /// /// - The provided pointer must point to a valid, well-aligned `spa_dict` struct, and must not be `NULL`. /// - The struct pointed to must be kept valid for the entire lifetime of the created `Dict`. /// /// Violating any of these rules will result in undefined behaviour. pub unsafe fn from_ptr(dict: *const spa_sys::spa_dict) -> Self { debug_assert!( !dict.is_null(), "Dict must not be created from a pointer that is NULL" ); Self(dict) } } impl ReadableDict for ForeignDict { fn get_dict_ptr(&self) -> *const spa_sys::spa_dict { self.0 } } impl fmt::Debug for ForeignDict { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME: Find a way to display flags too. f.debug_map().entries(self.iter_cstr()).finish() } } bitflags! { pub struct Flags: u32 { // These flags are redefinitions from // https://gitlab.freedesktop.org/pipewire/pipewire/-/blob/master/spa/include/spa/utils/dict.h const SORTED = spa_sys::SPA_DICT_FLAG_SORTED; } } pub struct CIter<'a> { next: *const spa_sys::spa_dict_item, /// Points to the first element outside of the allocated area. end: *const spa_sys::spa_dict_item, _phantom: PhantomData<&'a str>, } impl<'a> Iterator for CIter<'a> { type Item = (&'a CStr, &'a CStr); fn next(&mut self) -> Option<Self::Item> { if!self.next.is_null() && self.next < self.end { let k = unsafe { CStr::from_ptr((*self.next).key) }; let v = unsafe { CStr::from_ptr((*self.next).value) }; self.next = unsafe { self.next.add(1) }; Some((k, v)) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let bound: usize = unsafe { self.next.offset_from(self.end) as usize }; // We know the exact value, so lower bound and upper bound are the same. (bound, Some(bound)) } } pub struct Iter<'a> { inner: CIter<'a>, } impl<'a> Iterator for Iter<'a> { type Item = (&'a str, &'a str); fn next(&mut self) -> Option<Self::Item> { self.inner .find_map(|(k, v)| k.to_str().ok().zip(v.to_str().ok())) } fn size_hint(&self) -> (usize, Option<usize>) { // Lower bound is 0, as all keys left might not be valid UTF-8. (0, self.inner.size_hint().1) } } pub struct Keys<'a> { inner: CIter<'a>, } impl<'a> Iterator for Keys<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(k, _)| k.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } pub struct Values<'a> { inner: CIter<'a>, } impl<'a> Iterator for Values<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.inner.find_map(|(_, v)| v.to_str().ok()) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } #[cfg(test)] mod tests { use super::{Flags, ForeignDict, ReadableDict}; use spa_sys::{spa_dict, spa_dict_item}; use std::{ffi::CString, ptr}; /// Create a raw dict with the specified number of key-value pairs. /// /// `num_items` must not be zero, or this function will panic. /// /// Each key value pair is `("K<n>", "V<n>")`, with *\<n\>* being an element of the range `0..num_items`. /// /// The function returns a tuple consisting of: /// 1. An allocation (`Vec`) containing the raw Key and Value Strings. /// 2. An allocation (`Vec`) containing all the items. /// 3. The created `spa_dict` struct. /// /// The first two items must be kept alive for the entire lifetime of the returned `spa_dict` struct. fn make_raw_dict( num_items: u32, ) -> ( Vec<(CString, CString)>, Vec<spa_dict_item>, spa_sys::spa_dict, ) { assert!(num_items!= 0, "num_items must not be zero"); let mut strings: Vec<(CString, CString)> = Vec::with_capacity(num_items as usize); let mut items: Vec<spa_dict_item> = Vec::with_capacity(num_items as usize); for i in 0..num_items { let k = CString::new(format!("K{}", i)).unwrap(); let v = CString::new(format!("V{}", i)).unwrap(); let item = spa_dict_item { key: k.as_ptr(), value: v.as_ptr(), }; strings.push((k, v)); items.push(item); } let raw = spa_dict { flags: Flags::empty().bits, n_items: num_items, items: items.as_ptr(), }; (strings, items, raw) } #[test] fn test_empty_dict() { let raw = spa_dict { flags: Flags::empty().bits, n_items: 0, items: ptr::null(), }; let dict = unsafe { ForeignDict::from_ptr(&raw) }; let iter = dict.iter_cstr(); assert_eq!(0, dict.len()); iter.for_each(|_| panic!("Iterated over non-existing item")); } #[test] fn test_iter_cstr() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter_cstr(); assert_eq!( ( CString::new("K0").unwrap().as_c_str(), CString::new("V0").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!( ( CString::new("K1").unwrap().as_c_str(), CString::new("V1").unwrap().as_c_str() ), iter.next().unwrap() ); assert_eq!(None, iter.next()); } #[test] fn test_iterators() { let (_strings, _items, raw) = make_raw_dict(2); let dict = unsafe { ForeignDict::from_ptr(&raw) }; let mut iter = dict.iter(); assert_eq!(("K0", "V0"), iter.next().unwrap()); assert_eq!(("K1", "V1"), iter.next().unwrap()); assert_eq!(None, iter.next()); let mut key_iter = dict.keys(); assert_eq!("K0", key_iter.next().unwrap()); assert_eq!("K1", key_iter.next().unwrap()); assert_eq!(None, key_iter.next()); let mut val_iter = dict.values(); assert_eq!("V0", val_iter.next().unwrap()); assert_eq!("V1", val_iter.next().unwrap()); assert_eq!(None, val_iter.next()); } #[test] fn test_get() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(Some("V0"), dict.get("K0")); } #[test] fn test_debug() { let (_strings, _items, raw) = make_raw_dict(1); let dict = unsafe { ForeignDict::from_ptr(&raw) }; assert_eq!(r#"{"K0": "V0"}"#, &format!("{:?}", dict)) } }
random_line_split
lib.rs
//! # Substrate Enterprise Sample - Product Tracking pallet #![cfg_attr(not(feature = "std"), no_std)] use codec::alloc::string::ToString; use core::convert::TryInto; use frame_support::{ debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure, sp_runtime::offchain::{ self as rt_offchain, storage::StorageValueRef, storage_lock::{StorageLock, Time}, }, sp_std::prelude::*, traits::EnsureOrigin, }; use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes}; use product_registry::ProductId; #[cfg(test)] mod mock; #[cfg(test)] mod tests; mod types; use crate::types::*; mod builders; use crate::builders::*; // General constraints to limit data size // Note: these could also be passed as trait config parameters pub const IDENTIFIER_MAX_LENGTH: usize = 10; pub const SHIPMENT_MAX_PRODUCTS: usize = 10; pub const LISTENER_ENDPOINT: &str = "http://localhost:3005"; pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> { type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; type CreateRoleOrigin: EnsureOrigin<Self::Origin>; } decl_storage! { trait Store for Module<T: Trait> as ProductTracking { // Shipments pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>; pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>; // Shipping events pub EventCount get(fn event_count): u128 = 0; pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>; pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>; // Off-chain Worker notifications pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, { ShipmentRegistered(AccountId, ShipmentId, AccountId), ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus), } ); decl_error! { pub enum Error for Module<T: Trait> { InvalidOrMissingIdentifier, ShipmentAlreadyExists, ShipmentHasBeenDelivered, ShipmentIsInTransit, ShipmentIsUnknown, ShipmentHasTooManyProducts, ShippingEventAlreadyExists, ShippingEventMaxExceeded, OffchainWorkerAlreadyBusy } } decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin { type Error = Error<T>; fn deposit_event() = default; #[weight = 10_000] pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Validate shipment products Self::validate_shipment_products(&products)?; // Check shipment doesn't exist yet (1 DB read) Self::validate_new_shipment(&id)?; // Create a shipment instance let shipment = Self::new_shipment() .identified_by(id.clone()) .owned_by(owner.clone()) .registered_at(<timestamp::Module<T>>::now()) .with_products(products) .build(); let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(ShippingEventType::ShipmentRegistration) .for_shipment(id.clone()) .at_location(None) .with_readings(vec![]) .at_time(shipment.registered) .build(); // Storage writes // -------------- // Add shipment (2 DB write) <Shipments<T>>::insert(&id, shipment); <ShipmentsOfOrganization<T>>::append(&owner, &id); // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); // Raise events Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner)); Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); Ok(()) } #[weight = 10_000] pub fn track_shipment( origin, id: ShipmentId, operation: ShippingOperation, #[compact] timestamp: T::Moment, location: Option<ReadPoint>, readings: Option<Vec<Reading<T::Moment>>> ) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Check shipment is known (1 DB read) & do transition checks let mut shipment = match <Shipments<T>>::get(&id) { Some(shipment) => match shipment.status { ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered), ShipmentStatus::InTransit if operation == ShippingOperation::Pickup => Err(<Error<T>>::ShipmentIsInTransit), _ => Ok(shipment) } None => Err(<Error<T>>::ShipmentIsUnknown) }?; // Update shipment status shipment = match operation { ShippingOperation::Pickup => shipment.pickup(), ShippingOperation::Deliver => shipment.deliver(timestamp), _ => shipment, }; let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(operation.clone().into()) .for_shipment(id.clone()) .at_location(location) .with_readings(readings.unwrap_or_default()) .at_time(timestamp) .build(); // Storage writes // -------------- // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); if operation!= ShippingOperation::Scan { // Update shipment (1 DB write) <Shipments<T>>::insert(&id, shipment); // Raise events Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); } Ok(()) } fn offchain_worker(block_number: T::BlockNumber) { // Acquiring the lock let mut lock = StorageLock::<Time>::with_deadline( b"product_tracking_ocw::lock", rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION) ); match lock.try_lock() { Ok(_guard) => { Self::process_ocw_notifications(block_number); } Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); } }; } } } impl<T: Trait> Module<T> { // Helper methods fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> { ShipmentBuilder::<T::AccountId, T::Moment>::default() } fn new_shipping_event() -> ShippingEventBuilder<T::Moment> { ShippingEventBuilder::<T::Moment>::default() } fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> { let event_idx = EventCount::get() .checked_add(1) .ok_or(Error::<T>::ShippingEventMaxExceeded)?; EventCount::put(event_idx); EventsOfShipment::append(&event.shipment_id, event_idx); <AllEvents<T>>::insert(event_idx, event); Ok(event_idx) } // (Public) Validation methods pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> { // Basic identifier validation ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier); ensure!( id.len() <= IDENTIFIER_MAX_LENGTH, Error::<T>::InvalidOrMissingIdentifier ); Ok(()) } pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> { // Shipment existence check ensure!( !<Shipments<T>>::contains_key(id), Error::<T>::ShipmentAlreadyExists ); Ok(()) } pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>>
// --- Offchain worker methods --- fn process_ocw_notifications(block_number: T::BlockNumber) { // Check last processed block let last_processed_block_ref = StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block"); let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() { Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => { debug::info!( "[product_tracking_ocw] Skipping: Block {:?} has already been processed.", block_number ); return; } Some(Some(last_proccessed_block)) => { last_proccessed_block.try_into().ok().unwrap() as u32 } None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param _ => { debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block."); return; } }; let start_block = last_processed_block + 1; let end_block = block_number.try_into().ok().unwrap() as u32; for current_block in start_block..end_block { debug::debug!( "[product_tracking_ocw] Processing notifications for block {}", current_block ); let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into()); let listener_results: Result<Vec<_>, _> = ev_indices .iter() .map(|idx| match Self::event_by_idx(idx) { Some(ev) => Self::notify_listener(&ev), None => Ok(()), }) .collect(); if let Err(err) = listener_results { debug::warn!("[product_tracking_ocw] notify_listener error: {}", err); break; } last_processed_block = current_block; } // Save last processed block if last_processed_block >= start_block { last_processed_block_ref.set(&last_processed_block); debug::info!( "[product_tracking_ocw] Notifications successfully processed up to block {}", last_processed_block ); } } fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> { debug::info!("notifying listener: {:?}", ev); let request = sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]); let timeout = sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000)); let pending = request .add_header(&"Content-Type", &"text/plain") .deadline(timeout) // Setting the timeout time .send() // Sending the request out by the host .map_err(|_| "http post request building error")?; let response = pending .try_wait(timeout) .map_err(|_| "http post request sent error")? .map_err(|_| "http post request sent error")?; if response.code!= 200 { return Err("http response error"); } Ok(()) } }
{ ensure!( props.len() <= SHIPMENT_MAX_PRODUCTS, Error::<T>::ShipmentHasTooManyProducts, ); Ok(()) }
identifier_body
lib.rs
//! # Substrate Enterprise Sample - Product Tracking pallet #![cfg_attr(not(feature = "std"), no_std)] use codec::alloc::string::ToString; use core::convert::TryInto; use frame_support::{ debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure, sp_runtime::offchain::{ self as rt_offchain, storage::StorageValueRef, storage_lock::{StorageLock, Time}, }, sp_std::prelude::*, traits::EnsureOrigin, }; use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes}; use product_registry::ProductId; #[cfg(test)] mod mock; #[cfg(test)] mod tests; mod types; use crate::types::*; mod builders; use crate::builders::*; // General constraints to limit data size // Note: these could also be passed as trait config parameters pub const IDENTIFIER_MAX_LENGTH: usize = 10; pub const SHIPMENT_MAX_PRODUCTS: usize = 10; pub const LISTENER_ENDPOINT: &str = "http://localhost:3005"; pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> { type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; type CreateRoleOrigin: EnsureOrigin<Self::Origin>; } decl_storage! { trait Store for Module<T: Trait> as ProductTracking { // Shipments pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>; pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>; // Shipping events pub EventCount get(fn event_count): u128 = 0; pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>; pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>; // Off-chain Worker notifications pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, { ShipmentRegistered(AccountId, ShipmentId, AccountId), ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus), } ); decl_error! { pub enum Error for Module<T: Trait> { InvalidOrMissingIdentifier, ShipmentAlreadyExists, ShipmentHasBeenDelivered, ShipmentIsInTransit, ShipmentIsUnknown, ShipmentHasTooManyProducts, ShippingEventAlreadyExists, ShippingEventMaxExceeded, OffchainWorkerAlreadyBusy } } decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin { type Error = Error<T>; fn deposit_event() = default; #[weight = 10_000] pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Validate shipment products Self::validate_shipment_products(&products)?; // Check shipment doesn't exist yet (1 DB read) Self::validate_new_shipment(&id)?; // Create a shipment instance let shipment = Self::new_shipment() .identified_by(id.clone()) .owned_by(owner.clone())
let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(ShippingEventType::ShipmentRegistration) .for_shipment(id.clone()) .at_location(None) .with_readings(vec![]) .at_time(shipment.registered) .build(); // Storage writes // -------------- // Add shipment (2 DB write) <Shipments<T>>::insert(&id, shipment); <ShipmentsOfOrganization<T>>::append(&owner, &id); // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); // Raise events Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner)); Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); Ok(()) } #[weight = 10_000] pub fn track_shipment( origin, id: ShipmentId, operation: ShippingOperation, #[compact] timestamp: T::Moment, location: Option<ReadPoint>, readings: Option<Vec<Reading<T::Moment>>> ) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Check shipment is known (1 DB read) & do transition checks let mut shipment = match <Shipments<T>>::get(&id) { Some(shipment) => match shipment.status { ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered), ShipmentStatus::InTransit if operation == ShippingOperation::Pickup => Err(<Error<T>>::ShipmentIsInTransit), _ => Ok(shipment) } None => Err(<Error<T>>::ShipmentIsUnknown) }?; // Update shipment status shipment = match operation { ShippingOperation::Pickup => shipment.pickup(), ShippingOperation::Deliver => shipment.deliver(timestamp), _ => shipment, }; let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(operation.clone().into()) .for_shipment(id.clone()) .at_location(location) .with_readings(readings.unwrap_or_default()) .at_time(timestamp) .build(); // Storage writes // -------------- // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); if operation!= ShippingOperation::Scan { // Update shipment (1 DB write) <Shipments<T>>::insert(&id, shipment); // Raise events Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); } Ok(()) } fn offchain_worker(block_number: T::BlockNumber) { // Acquiring the lock let mut lock = StorageLock::<Time>::with_deadline( b"product_tracking_ocw::lock", rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION) ); match lock.try_lock() { Ok(_guard) => { Self::process_ocw_notifications(block_number); } Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); } }; } } } impl<T: Trait> Module<T> { // Helper methods fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> { ShipmentBuilder::<T::AccountId, T::Moment>::default() } fn new_shipping_event() -> ShippingEventBuilder<T::Moment> { ShippingEventBuilder::<T::Moment>::default() } fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> { let event_idx = EventCount::get() .checked_add(1) .ok_or(Error::<T>::ShippingEventMaxExceeded)?; EventCount::put(event_idx); EventsOfShipment::append(&event.shipment_id, event_idx); <AllEvents<T>>::insert(event_idx, event); Ok(event_idx) } // (Public) Validation methods pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> { // Basic identifier validation ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier); ensure!( id.len() <= IDENTIFIER_MAX_LENGTH, Error::<T>::InvalidOrMissingIdentifier ); Ok(()) } pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> { // Shipment existence check ensure!( !<Shipments<T>>::contains_key(id), Error::<T>::ShipmentAlreadyExists ); Ok(()) } pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> { ensure!( props.len() <= SHIPMENT_MAX_PRODUCTS, Error::<T>::ShipmentHasTooManyProducts, ); Ok(()) } // --- Offchain worker methods --- fn process_ocw_notifications(block_number: T::BlockNumber) { // Check last processed block let last_processed_block_ref = StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block"); let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() { Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => { debug::info!( "[product_tracking_ocw] Skipping: Block {:?} has already been processed.", block_number ); return; } Some(Some(last_proccessed_block)) => { last_proccessed_block.try_into().ok().unwrap() as u32 } None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param _ => { debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block."); return; } }; let start_block = last_processed_block + 1; let end_block = block_number.try_into().ok().unwrap() as u32; for current_block in start_block..end_block { debug::debug!( "[product_tracking_ocw] Processing notifications for block {}", current_block ); let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into()); let listener_results: Result<Vec<_>, _> = ev_indices .iter() .map(|idx| match Self::event_by_idx(idx) { Some(ev) => Self::notify_listener(&ev), None => Ok(()), }) .collect(); if let Err(err) = listener_results { debug::warn!("[product_tracking_ocw] notify_listener error: {}", err); break; } last_processed_block = current_block; } // Save last processed block if last_processed_block >= start_block { last_processed_block_ref.set(&last_processed_block); debug::info!( "[product_tracking_ocw] Notifications successfully processed up to block {}", last_processed_block ); } } fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> { debug::info!("notifying listener: {:?}", ev); let request = sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]); let timeout = sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000)); let pending = request .add_header(&"Content-Type", &"text/plain") .deadline(timeout) // Setting the timeout time .send() // Sending the request out by the host .map_err(|_| "http post request building error")?; let response = pending .try_wait(timeout) .map_err(|_| "http post request sent error")? .map_err(|_| "http post request sent error")?; if response.code!= 200 { return Err("http response error"); } Ok(()) } }
.registered_at(<timestamp::Module<T>>::now()) .with_products(products) .build();
random_line_split
lib.rs
//! # Substrate Enterprise Sample - Product Tracking pallet #![cfg_attr(not(feature = "std"), no_std)] use codec::alloc::string::ToString; use core::convert::TryInto; use frame_support::{ debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure, sp_runtime::offchain::{ self as rt_offchain, storage::StorageValueRef, storage_lock::{StorageLock, Time}, }, sp_std::prelude::*, traits::EnsureOrigin, }; use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes}; use product_registry::ProductId; #[cfg(test)] mod mock; #[cfg(test)] mod tests; mod types; use crate::types::*; mod builders; use crate::builders::*; // General constraints to limit data size // Note: these could also be passed as trait config parameters pub const IDENTIFIER_MAX_LENGTH: usize = 10; pub const SHIPMENT_MAX_PRODUCTS: usize = 10; pub const LISTENER_ENDPOINT: &str = "http://localhost:3005"; pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> { type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; type CreateRoleOrigin: EnsureOrigin<Self::Origin>; } decl_storage! { trait Store for Module<T: Trait> as ProductTracking { // Shipments pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>; pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>; // Shipping events pub EventCount get(fn event_count): u128 = 0; pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>; pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>; // Off-chain Worker notifications pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, { ShipmentRegistered(AccountId, ShipmentId, AccountId), ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus), } ); decl_error! { pub enum Error for Module<T: Trait> { InvalidOrMissingIdentifier, ShipmentAlreadyExists, ShipmentHasBeenDelivered, ShipmentIsInTransit, ShipmentIsUnknown, ShipmentHasTooManyProducts, ShippingEventAlreadyExists, ShippingEventMaxExceeded, OffchainWorkerAlreadyBusy } } decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin { type Error = Error<T>; fn deposit_event() = default; #[weight = 10_000] pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Validate shipment products Self::validate_shipment_products(&products)?; // Check shipment doesn't exist yet (1 DB read) Self::validate_new_shipment(&id)?; // Create a shipment instance let shipment = Self::new_shipment() .identified_by(id.clone()) .owned_by(owner.clone()) .registered_at(<timestamp::Module<T>>::now()) .with_products(products) .build(); let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(ShippingEventType::ShipmentRegistration) .for_shipment(id.clone()) .at_location(None) .with_readings(vec![]) .at_time(shipment.registered) .build(); // Storage writes // -------------- // Add shipment (2 DB write) <Shipments<T>>::insert(&id, shipment); <ShipmentsOfOrganization<T>>::append(&owner, &id); // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); // Raise events Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner)); Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); Ok(()) } #[weight = 10_000] pub fn track_shipment( origin, id: ShipmentId, operation: ShippingOperation, #[compact] timestamp: T::Moment, location: Option<ReadPoint>, readings: Option<Vec<Reading<T::Moment>>> ) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Check shipment is known (1 DB read) & do transition checks let mut shipment = match <Shipments<T>>::get(&id) { Some(shipment) => match shipment.status { ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered), ShipmentStatus::InTransit if operation == ShippingOperation::Pickup => Err(<Error<T>>::ShipmentIsInTransit), _ => Ok(shipment) } None => Err(<Error<T>>::ShipmentIsUnknown) }?; // Update shipment status shipment = match operation { ShippingOperation::Pickup => shipment.pickup(), ShippingOperation::Deliver => shipment.deliver(timestamp), _ => shipment, }; let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(operation.clone().into()) .for_shipment(id.clone()) .at_location(location) .with_readings(readings.unwrap_or_default()) .at_time(timestamp) .build(); // Storage writes // -------------- // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); if operation!= ShippingOperation::Scan { // Update shipment (1 DB write) <Shipments<T>>::insert(&id, shipment); // Raise events Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); } Ok(()) } fn offchain_worker(block_number: T::BlockNumber) { // Acquiring the lock let mut lock = StorageLock::<Time>::with_deadline( b"product_tracking_ocw::lock", rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION) ); match lock.try_lock() { Ok(_guard) => { Self::process_ocw_notifications(block_number); } Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); } }; } } } impl<T: Trait> Module<T> { // Helper methods fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> { ShipmentBuilder::<T::AccountId, T::Moment>::default() } fn new_shipping_event() -> ShippingEventBuilder<T::Moment> { ShippingEventBuilder::<T::Moment>::default() } fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> { let event_idx = EventCount::get() .checked_add(1) .ok_or(Error::<T>::ShippingEventMaxExceeded)?; EventCount::put(event_idx); EventsOfShipment::append(&event.shipment_id, event_idx); <AllEvents<T>>::insert(event_idx, event); Ok(event_idx) } // (Public) Validation methods pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> { // Basic identifier validation ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier); ensure!( id.len() <= IDENTIFIER_MAX_LENGTH, Error::<T>::InvalidOrMissingIdentifier ); Ok(()) } pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> { // Shipment existence check ensure!( !<Shipments<T>>::contains_key(id), Error::<T>::ShipmentAlreadyExists ); Ok(()) } pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> { ensure!( props.len() <= SHIPMENT_MAX_PRODUCTS, Error::<T>::ShipmentHasTooManyProducts, ); Ok(()) } // --- Offchain worker methods --- fn process_ocw_notifications(block_number: T::BlockNumber) { // Check last processed block let last_processed_block_ref = StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block"); let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() { Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => { debug::info!( "[product_tracking_ocw] Skipping: Block {:?} has already been processed.", block_number ); return; } Some(Some(last_proccessed_block)) => { last_proccessed_block.try_into().ok().unwrap() as u32 } None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param _ => { debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block."); return; } }; let start_block = last_processed_block + 1; let end_block = block_number.try_into().ok().unwrap() as u32; for current_block in start_block..end_block { debug::debug!( "[product_tracking_ocw] Processing notifications for block {}", current_block ); let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into()); let listener_results: Result<Vec<_>, _> = ev_indices .iter() .map(|idx| match Self::event_by_idx(idx) { Some(ev) => Self::notify_listener(&ev), None => Ok(()), }) .collect(); if let Err(err) = listener_results { debug::warn!("[product_tracking_ocw] notify_listener error: {}", err); break; } last_processed_block = current_block; } // Save last processed block if last_processed_block >= start_block
} fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> { debug::info!("notifying listener: {:?}", ev); let request = sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]); let timeout = sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000)); let pending = request .add_header(&"Content-Type", &"text/plain") .deadline(timeout) // Setting the timeout time .send() // Sending the request out by the host .map_err(|_| "http post request building error")?; let response = pending .try_wait(timeout) .map_err(|_| "http post request sent error")? .map_err(|_| "http post request sent error")?; if response.code!= 200 { return Err("http response error"); } Ok(()) } }
{ last_processed_block_ref.set(&last_processed_block); debug::info!( "[product_tracking_ocw] Notifications successfully processed up to block {}", last_processed_block ); }
conditional_block
lib.rs
//! # Substrate Enterprise Sample - Product Tracking pallet #![cfg_attr(not(feature = "std"), no_std)] use codec::alloc::string::ToString; use core::convert::TryInto; use frame_support::{ debug, decl_error, decl_event, decl_module, decl_storage, dispatch, ensure, sp_runtime::offchain::{ self as rt_offchain, storage::StorageValueRef, storage_lock::{StorageLock, Time}, }, sp_std::prelude::*, traits::EnsureOrigin, }; use frame_system::{self as system, ensure_signed, offchain::SendTransactionTypes}; use product_registry::ProductId; #[cfg(test)] mod mock; #[cfg(test)] mod tests; mod types; use crate::types::*; mod builders; use crate::builders::*; // General constraints to limit data size // Note: these could also be passed as trait config parameters pub const IDENTIFIER_MAX_LENGTH: usize = 10; pub const SHIPMENT_MAX_PRODUCTS: usize = 10; pub const LISTENER_ENDPOINT: &str = "http://localhost:3005"; pub const LOCK_TIMEOUT_EXPIRATION: u64 = 3000; // in milli-seconds pub trait Trait: system::Trait + timestamp::Trait + SendTransactionTypes<Call<Self>> { type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; type CreateRoleOrigin: EnsureOrigin<Self::Origin>; } decl_storage! { trait Store for Module<T: Trait> as ProductTracking { // Shipments pub Shipments get(fn shipment_by_id): map hasher(blake2_128_concat) ShipmentId => Option<Shipment<T::AccountId, T::Moment>>; pub ShipmentsOfOrganization get(fn shipments_of_org): map hasher(blake2_128_concat) T::AccountId => Vec<ShipmentId>; // Shipping events pub EventCount get(fn event_count): u128 = 0; pub AllEvents get(fn event_by_idx): map hasher(blake2_128_concat) ShippingEventIndex => Option<ShippingEvent<T::Moment>>; pub EventsOfShipment get(fn events_of_shipment): map hasher(blake2_128_concat) ShipmentId => Vec<ShippingEventIndex>; // Off-chain Worker notifications pub OcwNotifications get (fn ocw_notifications): map hasher(identity) T::BlockNumber => Vec<ShippingEventIndex>; } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId, { ShipmentRegistered(AccountId, ShipmentId, AccountId), ShipmentStatusUpdated(AccountId, ShipmentId, ShippingEventIndex, ShipmentStatus), } ); decl_error! { pub enum Error for Module<T: Trait> { InvalidOrMissingIdentifier, ShipmentAlreadyExists, ShipmentHasBeenDelivered, ShipmentIsInTransit, ShipmentIsUnknown, ShipmentHasTooManyProducts, ShippingEventAlreadyExists, ShippingEventMaxExceeded, OffchainWorkerAlreadyBusy } } decl_module! { pub struct Module<T: Trait> for enum Call where origin: T::Origin { type Error = Error<T>; fn deposit_event() = default; #[weight = 10_000] pub fn register_shipment(origin, id: ShipmentId, owner: T::AccountId, products: Vec<ProductId>) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Validate shipment products Self::validate_shipment_products(&products)?; // Check shipment doesn't exist yet (1 DB read) Self::validate_new_shipment(&id)?; // Create a shipment instance let shipment = Self::new_shipment() .identified_by(id.clone()) .owned_by(owner.clone()) .registered_at(<timestamp::Module<T>>::now()) .with_products(products) .build(); let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(ShippingEventType::ShipmentRegistration) .for_shipment(id.clone()) .at_location(None) .with_readings(vec![]) .at_time(shipment.registered) .build(); // Storage writes // -------------- // Add shipment (2 DB write) <Shipments<T>>::insert(&id, shipment); <ShipmentsOfOrganization<T>>::append(&owner, &id); // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); // Raise events Self::deposit_event(RawEvent::ShipmentRegistered(who.clone(), id.clone(), owner)); Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); Ok(()) } #[weight = 10_000] pub fn track_shipment( origin, id: ShipmentId, operation: ShippingOperation, #[compact] timestamp: T::Moment, location: Option<ReadPoint>, readings: Option<Vec<Reading<T::Moment>>> ) -> dispatch::DispatchResult { T::CreateRoleOrigin::ensure_origin(origin.clone())?; let who = ensure_signed(origin)?; // Validate format of shipment ID Self::validate_identifier(&id)?; // Check shipment is known (1 DB read) & do transition checks let mut shipment = match <Shipments<T>>::get(&id) { Some(shipment) => match shipment.status { ShipmentStatus::Delivered => Err(<Error<T>>::ShipmentHasBeenDelivered), ShipmentStatus::InTransit if operation == ShippingOperation::Pickup => Err(<Error<T>>::ShipmentIsInTransit), _ => Ok(shipment) } None => Err(<Error<T>>::ShipmentIsUnknown) }?; // Update shipment status shipment = match operation { ShippingOperation::Pickup => shipment.pickup(), ShippingOperation::Deliver => shipment.deliver(timestamp), _ => shipment, }; let status = shipment.status.clone(); // Create shipping event let event = Self::new_shipping_event() .of_type(operation.clone().into()) .for_shipment(id.clone()) .at_location(location) .with_readings(readings.unwrap_or_default()) .at_time(timestamp) .build(); // Storage writes // -------------- // Store shipping event (1 DB read, 3 DB writes) let event_idx = Self::store_event(event)?; // Update offchain notifications (1 DB write) <OcwNotifications<T>>::append(<system::Module<T>>::block_number(), event_idx); if operation!= ShippingOperation::Scan { // Update shipment (1 DB write) <Shipments<T>>::insert(&id, shipment); // Raise events Self::deposit_event(RawEvent::ShipmentStatusUpdated(who, id, event_idx, status)); } Ok(()) } fn offchain_worker(block_number: T::BlockNumber) { // Acquiring the lock let mut lock = StorageLock::<Time>::with_deadline( b"product_tracking_ocw::lock", rt_offchain::Duration::from_millis(LOCK_TIMEOUT_EXPIRATION) ); match lock.try_lock() { Ok(_guard) => { Self::process_ocw_notifications(block_number); } Err(_err) => { debug::info!("[product_tracking_ocw] lock is already acquired"); } }; } } } impl<T: Trait> Module<T> { // Helper methods fn new_shipment() -> ShipmentBuilder<T::AccountId, T::Moment> { ShipmentBuilder::<T::AccountId, T::Moment>::default() } fn new_shipping_event() -> ShippingEventBuilder<T::Moment> { ShippingEventBuilder::<T::Moment>::default() } fn store_event(event: ShippingEvent<T::Moment>) -> Result<ShippingEventIndex, Error<T>> { let event_idx = EventCount::get() .checked_add(1) .ok_or(Error::<T>::ShippingEventMaxExceeded)?; EventCount::put(event_idx); EventsOfShipment::append(&event.shipment_id, event_idx); <AllEvents<T>>::insert(event_idx, event); Ok(event_idx) } // (Public) Validation methods pub fn validate_identifier(id: &[u8]) -> Result<(), Error<T>> { // Basic identifier validation ensure!(!id.is_empty(), Error::<T>::InvalidOrMissingIdentifier); ensure!( id.len() <= IDENTIFIER_MAX_LENGTH, Error::<T>::InvalidOrMissingIdentifier ); Ok(()) } pub fn validate_new_shipment(id: &[u8]) -> Result<(), Error<T>> { // Shipment existence check ensure!( !<Shipments<T>>::contains_key(id), Error::<T>::ShipmentAlreadyExists ); Ok(()) } pub fn validate_shipment_products(props: &[ProductId]) -> Result<(), Error<T>> { ensure!( props.len() <= SHIPMENT_MAX_PRODUCTS, Error::<T>::ShipmentHasTooManyProducts, ); Ok(()) } // --- Offchain worker methods --- fn
(block_number: T::BlockNumber) { // Check last processed block let last_processed_block_ref = StorageValueRef::persistent(b"product_tracking_ocw::last_proccessed_block"); let mut last_processed_block: u32 = match last_processed_block_ref.get::<T::BlockNumber>() { Some(Some(last_proccessed_block)) if last_proccessed_block >= block_number => { debug::info!( "[product_tracking_ocw] Skipping: Block {:?} has already been processed.", block_number ); return; } Some(Some(last_proccessed_block)) => { last_proccessed_block.try_into().ok().unwrap() as u32 } None => 0u32, //TODO: define a OCW_MAX_BACKTRACK_PERIOD param _ => { debug::error!("[product_tracking_ocw] Error reading product_tracking_ocw::last_proccessed_block."); return; } }; let start_block = last_processed_block + 1; let end_block = block_number.try_into().ok().unwrap() as u32; for current_block in start_block..end_block { debug::debug!( "[product_tracking_ocw] Processing notifications for block {}", current_block ); let ev_indices = Self::ocw_notifications::<T::BlockNumber>(current_block.into()); let listener_results: Result<Vec<_>, _> = ev_indices .iter() .map(|idx| match Self::event_by_idx(idx) { Some(ev) => Self::notify_listener(&ev), None => Ok(()), }) .collect(); if let Err(err) = listener_results { debug::warn!("[product_tracking_ocw] notify_listener error: {}", err); break; } last_processed_block = current_block; } // Save last processed block if last_processed_block >= start_block { last_processed_block_ref.set(&last_processed_block); debug::info!( "[product_tracking_ocw] Notifications successfully processed up to block {}", last_processed_block ); } } fn notify_listener(ev: &ShippingEvent<T::Moment>) -> Result<(), &'static str> { debug::info!("notifying listener: {:?}", ev); let request = sp_runtime::offchain::http::Request::post(&LISTENER_ENDPOINT, vec![ev.to_string()]); let timeout = sp_io::offchain::timestamp().add(sp_runtime::offchain::Duration::from_millis(3000)); let pending = request .add_header(&"Content-Type", &"text/plain") .deadline(timeout) // Setting the timeout time .send() // Sending the request out by the host .map_err(|_| "http post request building error")?; let response = pending .try_wait(timeout) .map_err(|_| "http post request sent error")? .map_err(|_| "http post request sent error")?; if response.code!= 200 { return Err("http response error"); } Ok(()) } }
process_ocw_notifications
identifier_name
app.rs
use audio; use audio::cpal; use find_folder; use glium::glutin; use state; use std; use std::cell::{Cell, RefCell}; use std::collections::HashMap; use std::marker::PhantomData; use std::path::PathBuf; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Duration; use window::{self, Window}; use ui; /// An **App** represents the entire context of your application. /// /// The **App** owns and manages: /// /// - the event loop (used to drive the application forward) /// - all OpenGL windows (for graphics and user input, can be referenced via IDs). pub struct App { pub(crate) events_loop: glutin::EventsLoop, pub(crate) windows: RefCell<HashMap<window::Id, Window>>, pub(super) exit_on_escape: Cell<bool>, pub(crate) ui: ui::Arrangement, loop_mode: Cell<LoopMode>, /// The `App`'s audio-related API. pub audio: Audio, /// The current state of the `Mouse`. pub mouse: state::Mouse, /// State of the window currently in focus. pub window: state::Window, /// State of the keyboard keys. /// /// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`. /// /// `down` is the set of keys that are currently pressed. /// /// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is /// pressed while the app is in focus and then released when out of focus. Eventually we should /// change this to query the OS somehow, but I don't think `winit` provides a way to do this /// yet. pub keys: state::Keys, } /// An **App**'s audio API. pub struct Audio { event_loop: Arc<cpal::EventLoop>, process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>, } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub struct Proxy { events_loop_proxy: glutin::EventsLoopProxy, } /// The mode in which the **App** is currently running the event loop. #[derive(Copy, Clone, Debug, PartialEq)] pub enum LoopMode { /// Specifies that the application is continuously looping at a consistent rate. /// /// An application running in the **Rate** loop mode will behave as follows: /// /// 1. Poll for and collect all pending user input. /// `update` is then called with all application events that have occurred. /// /// 2. `update` is called with an `Event::Update`. /// /// 3. `draw` is called. /// /// 4. Check the time and sleep for the remainder of the `update_intervale`. Rate { /// The minimum interval between emitted updates. update_interval: Duration, }, Wait { /// The number of `update`s (and in turn `draw`s) that should occur since the application /// last received a non-`Update` event. updates_following_event: usize, /// The minimum interval between emitted updates. update_interval: Duration, }, } fn update_interval(fps: f64) -> Duration { assert!(fps > 0.0); const NANOSEC_PER_SEC: f64 = 1_000_000_000.0; let interval_nanosecs = NANOSEC_PER_SEC / fps; let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64; let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32; Duration::new(secs, nanosecs) } impl LoopMode { pub const DEFAULT_RATE_FPS: f64 = 60.0; pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3; /// Specify the **Rate** mode with the given frames-per-second. pub fn rate_fps(fps: f64) -> Self { let update_interval = update_interval(fps); LoopMode::Rate { update_interval } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Uses the default update interval. pub fn wait(updates_following_event: usize) -> Self { let update_interval = update_interval(Self::DEFAULT_RATE_FPS); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self { let update_interval = update_interval(max_fps); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self { LoopMode::Wait { updates_following_event, update_interval, } } } impl Default for LoopMode { fn default() -> Self { LoopMode::rate_fps(Self::DEFAULT_RATE_FPS) } } impl App { pub const ASSETS_DIRECTORY_NAME: &'static str = "assets"; pub const DEFAULT_EXIT_ON_ESCAPE: bool = true; // Create a new `App`. pub(super) fn new(events_loop: glutin::EventsLoop) -> Self { let windows = RefCell::new(HashMap::new()); let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE); let loop_mode = Cell::new(LoopMode::default()); let cpal_event_loop = Arc::new(cpal::EventLoop::new()); let process_fn_tx = RefCell::new(None); let audio = Audio { event_loop: cpal_event_loop, process_fn_tx }; let ui = ui::Arrangement::new(); let mouse = state::Mouse::new(); let window = state::Window::new(); let keys = state::Keys::default(); App { events_loop, windows, exit_on_escape, loop_mode, audio, ui, mouse, window, keys, } } /// Find and return the absolute path to the project's `assets` directory. /// /// This method looks for the assets directory in the following order: /// /// 1. Checks the same directory as the executable. /// 2. Recursively checks exe's parent directories (to a max depth of 5). /// 3. Recursively checks exe's children directories (to a max depth of 3). pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> { let exe_path = std::env::current_exe()?; find_folder::Search::ParentsThenKids(5, 3) .of(exe_path.parent().expect("executable has no parent directory to search").into()) .for_folder(Self::ASSETS_DIRECTORY_NAME) } /// Begin building a new OpenGL window. pub fn new_window<'a>(&'a self) -> window::Builder<'a,'static> { window::Builder::new(self) } /// The number of windows currently in the application. pub fn window_count(&self) -> usize { self.windows.borrow().len() } /// A reference to the window with the given `Id`. pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> { let windows = self.windows.borrow(); if!windows.contains_key(&id) { None } else { Some(std::cell::Ref::map(windows, |ws| &ws[&id])) } } /// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed. pub fn exit_on_escape(&self) -> bool { self.exit_on_escape.get() } /// Specify whether or not the app should close when the `Escape` key is pressed. /// /// By default this is `true`. pub fn set_exit_on_escape(&self, b: bool) { self.exit_on_escape.set(b); } /// Returns the **App**'s current **LoopMode**. pub fn loop_mode(&self) -> LoopMode { self.loop_mode.get() } /// Sets the loop mode of the **App**. /// /// Note: Setting the loop mode will not affect anything until the end of the current loop /// iteration. The behaviour of a single loop iteration is described under each of the /// **LoopMode** variants. pub fn set_loop_mode(&self, mode: LoopMode) { self.loop_mode.set(mode); } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub fn create_proxy(&self) -> Proxy { let events_loop_proxy = self.events_loop.create_proxy(); Proxy { events_loop_proxy } } /// Create a new `Ui` for the window with the given `Id`. /// /// Returns `None` if there is no window for the given `window_id`. pub fn new_ui(&self, window_id: window::Id) -> ui::Builder { ui::Builder::new(self, window_id) } } impl Audio { /// Enumerate the available audio devices on the system. /// /// Produces an iterator yielding `audio::Device`s. pub fn devices(&self) -> audio::Devices { let devices = cpal::devices(); audio::Devices { devices } } /// Enumerate the available audio devices on the system that support input streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn input_devices(&self) -> audio::stream::input::Devices { let devices = cpal::input_devices(); audio::stream::input::Devices { devices } } /// Enumerate the available audio devices on the system that support output streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn output_devices(&self) -> audio::stream::output::Devices { let devices = cpal::output_devices(); audio::stream::output::Devices { devices } } /// The current default audio input device. pub fn default_input_device(&self) -> Option<audio::Device> { cpal::default_input_device() .map(|device| audio::Device { device }) } /// The current default audio output device. pub fn default_output_device(&self) -> Option<audio::Device> { cpal::default_output_device() .map(|device| audio::Device { device }) } /// Begin building a new input audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_input_stream<M, F, S>(&self, model: M, capture: F) -> audio::stream::input::Builder<M, F, S> { audio::stream::input::Builder { capture, builder: self.new_stream(model), } } /// Begin building a new output audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_output_stream<M, F, S>(&self, model: M, render: F) -> audio::stream::output::Builder<M, F, S> { audio::stream::output::Builder { render, builder: self.new_stream(model), } } // Builder initialisation shared between input and output streams. // // If this is the first time a stream has been created, this method will spawn the // `cpal::EventLoop::run` method on its own thread, ready to run built streams. fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> { let process_fn_tx = if self.process_fn_tx.borrow().is_none() { let event_loop = self.event_loop.clone(); let (tx, rx) = mpsc::channel(); let mut loop_context = audio::stream::LoopContext::new(rx); thread::Builder::new() .name("cpal::EventLoop::run thread".into()) .spawn(move || event_loop.run(move |id, data| loop_context.process(id, data))) .expect("failed to spawn cpal::EventLoop::run thread"); *self.process_fn_tx.borrow_mut() = Some(tx.clone()); tx } else { self.process_fn_tx.borrow().as_ref().unwrap().clone() }; audio::stream::Builder { event_loop: self.event_loop.clone(), process_fn_tx: process_fn_tx,
model, sample_rate: None, channels: None, frames_per_buffer: None, device: None, sample_format: PhantomData, } } } impl Proxy { /// Wake up the application! /// /// This wakes up the **App**'s inner event loop and inserts an **Awakened** event. pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> { self.events_loop_proxy.wakeup() } }
random_line_split
app.rs
use audio; use audio::cpal; use find_folder; use glium::glutin; use state; use std; use std::cell::{Cell, RefCell}; use std::collections::HashMap; use std::marker::PhantomData; use std::path::PathBuf; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Duration; use window::{self, Window}; use ui; /// An **App** represents the entire context of your application. /// /// The **App** owns and manages: /// /// - the event loop (used to drive the application forward) /// - all OpenGL windows (for graphics and user input, can be referenced via IDs). pub struct App { pub(crate) events_loop: glutin::EventsLoop, pub(crate) windows: RefCell<HashMap<window::Id, Window>>, pub(super) exit_on_escape: Cell<bool>, pub(crate) ui: ui::Arrangement, loop_mode: Cell<LoopMode>, /// The `App`'s audio-related API. pub audio: Audio, /// The current state of the `Mouse`. pub mouse: state::Mouse, /// State of the window currently in focus. pub window: state::Window, /// State of the keyboard keys. /// /// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`. /// /// `down` is the set of keys that are currently pressed. /// /// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is /// pressed while the app is in focus and then released when out of focus. Eventually we should /// change this to query the OS somehow, but I don't think `winit` provides a way to do this /// yet. pub keys: state::Keys, } /// An **App**'s audio API. pub struct Audio { event_loop: Arc<cpal::EventLoop>, process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>, } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub struct Proxy { events_loop_proxy: glutin::EventsLoopProxy, } /// The mode in which the **App** is currently running the event loop. #[derive(Copy, Clone, Debug, PartialEq)] pub enum LoopMode { /// Specifies that the application is continuously looping at a consistent rate. /// /// An application running in the **Rate** loop mode will behave as follows: /// /// 1. Poll for and collect all pending user input. /// `update` is then called with all application events that have occurred. /// /// 2. `update` is called with an `Event::Update`. /// /// 3. `draw` is called. /// /// 4. Check the time and sleep for the remainder of the `update_intervale`. Rate { /// The minimum interval between emitted updates. update_interval: Duration, }, Wait { /// The number of `update`s (and in turn `draw`s) that should occur since the application /// last received a non-`Update` event. updates_following_event: usize, /// The minimum interval between emitted updates. update_interval: Duration, }, } fn update_interval(fps: f64) -> Duration { assert!(fps > 0.0); const NANOSEC_PER_SEC: f64 = 1_000_000_000.0; let interval_nanosecs = NANOSEC_PER_SEC / fps; let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64; let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32; Duration::new(secs, nanosecs) } impl LoopMode { pub const DEFAULT_RATE_FPS: f64 = 60.0; pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3; /// Specify the **Rate** mode with the given frames-per-second. pub fn rate_fps(fps: f64) -> Self { let update_interval = update_interval(fps); LoopMode::Rate { update_interval } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Uses the default update interval. pub fn wait(updates_following_event: usize) -> Self { let update_interval = update_interval(Self::DEFAULT_RATE_FPS); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self { let update_interval = update_interval(max_fps); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self { LoopMode::Wait { updates_following_event, update_interval, } } } impl Default for LoopMode { fn default() -> Self { LoopMode::rate_fps(Self::DEFAULT_RATE_FPS) } } impl App { pub const ASSETS_DIRECTORY_NAME: &'static str = "assets"; pub const DEFAULT_EXIT_ON_ESCAPE: bool = true; // Create a new `App`. pub(super) fn new(events_loop: glutin::EventsLoop) -> Self { let windows = RefCell::new(HashMap::new()); let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE); let loop_mode = Cell::new(LoopMode::default()); let cpal_event_loop = Arc::new(cpal::EventLoop::new()); let process_fn_tx = RefCell::new(None); let audio = Audio { event_loop: cpal_event_loop, process_fn_tx }; let ui = ui::Arrangement::new(); let mouse = state::Mouse::new(); let window = state::Window::new(); let keys = state::Keys::default(); App { events_loop, windows, exit_on_escape, loop_mode, audio, ui, mouse, window, keys, } } /// Find and return the absolute path to the project's `assets` directory. /// /// This method looks for the assets directory in the following order: /// /// 1. Checks the same directory as the executable. /// 2. Recursively checks exe's parent directories (to a max depth of 5). /// 3. Recursively checks exe's children directories (to a max depth of 3). pub fn assets_path(&self) -> Result<PathBuf, find_folder::Error> { let exe_path = std::env::current_exe()?; find_folder::Search::ParentsThenKids(5, 3) .of(exe_path.parent().expect("executable has no parent directory to search").into()) .for_folder(Self::ASSETS_DIRECTORY_NAME) } /// Begin building a new OpenGL window. pub fn new_window<'a>(&'a self) -> window::Builder<'a,'static> { window::Builder::new(self) } /// The number of windows currently in the application. pub fn window_count(&self) -> usize { self.windows.borrow().len() } /// A reference to the window with the given `Id`. pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> { let windows = self.windows.borrow(); if!windows.contains_key(&id) { None } else { Some(std::cell::Ref::map(windows, |ws| &ws[&id])) } } /// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed. pub fn exit_on_escape(&self) -> bool { self.exit_on_escape.get() } /// Specify whether or not the app should close when the `Escape` key is pressed. /// /// By default this is `true`. pub fn set_exit_on_escape(&self, b: bool) { self.exit_on_escape.set(b); } /// Returns the **App**'s current **LoopMode**. pub fn loop_mode(&self) -> LoopMode { self.loop_mode.get() } /// Sets the loop mode of the **App**. /// /// Note: Setting the loop mode will not affect anything until the end of the current loop /// iteration. The behaviour of a single loop iteration is described under each of the /// **LoopMode** variants. pub fn set_loop_mode(&self, mode: LoopMode) { self.loop_mode.set(mode); } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub fn create_proxy(&self) -> Proxy { let events_loop_proxy = self.events_loop.create_proxy(); Proxy { events_loop_proxy } } /// Create a new `Ui` for the window with the given `Id`. /// /// Returns `None` if there is no window for the given `window_id`. pub fn new_ui(&self, window_id: window::Id) -> ui::Builder { ui::Builder::new(self, window_id) } } impl Audio { /// Enumerate the available audio devices on the system. /// /// Produces an iterator yielding `audio::Device`s. pub fn devices(&self) -> audio::Devices { let devices = cpal::devices(); audio::Devices { devices } } /// Enumerate the available audio devices on the system that support input streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn input_devices(&self) -> audio::stream::input::Devices { let devices = cpal::input_devices(); audio::stream::input::Devices { devices } } /// Enumerate the available audio devices on the system that support output streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn output_devices(&self) -> audio::stream::output::Devices { let devices = cpal::output_devices(); audio::stream::output::Devices { devices } } /// The current default audio input device. pub fn default_input_device(&self) -> Option<audio::Device> { cpal::default_input_device() .map(|device| audio::Device { device }) } /// The current default audio output device. pub fn default_output_device(&self) -> Option<audio::Device> { cpal::default_output_device() .map(|device| audio::Device { device }) } /// Begin building a new input audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_input_stream<M, F, S>(&self, model: M, capture: F) -> audio::stream::input::Builder<M, F, S> { audio::stream::input::Builder { capture, builder: self.new_stream(model), } } /// Begin building a new output audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_output_stream<M, F, S>(&self, model: M, render: F) -> audio::stream::output::Builder<M, F, S> { audio::stream::output::Builder { render, builder: self.new_stream(model), } } // Builder initialisation shared between input and output streams. // // If this is the first time a stream has been created, this method will spawn the // `cpal::EventLoop::run` method on its own thread, ready to run built streams. fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> { let process_fn_tx = if self.process_fn_tx.borrow().is_none() { let event_loop = self.event_loop.clone(); let (tx, rx) = mpsc::channel(); let mut loop_context = audio::stream::LoopContext::new(rx); thread::Builder::new() .name("cpal::EventLoop::run thread".into()) .spawn(move || event_loop.run(move |id, data| loop_context.process(id, data))) .expect("failed to spawn cpal::EventLoop::run thread"); *self.process_fn_tx.borrow_mut() = Some(tx.clone()); tx } else { self.process_fn_tx.borrow().as_ref().unwrap().clone() }; audio::stream::Builder { event_loop: self.event_loop.clone(), process_fn_tx: process_fn_tx, model, sample_rate: None, channels: None, frames_per_buffer: None, device: None, sample_format: PhantomData, } } } impl Proxy { /// Wake up the application! /// /// This wakes up the **App**'s inner event loop and inserts an **Awakened** event. pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed>
}
{ self.events_loop_proxy.wakeup() }
identifier_body
app.rs
use audio; use audio::cpal; use find_folder; use glium::glutin; use state; use std; use std::cell::{Cell, RefCell}; use std::collections::HashMap; use std::marker::PhantomData; use std::path::PathBuf; use std::sync::{mpsc, Arc}; use std::thread; use std::time::Duration; use window::{self, Window}; use ui; /// An **App** represents the entire context of your application. /// /// The **App** owns and manages: /// /// - the event loop (used to drive the application forward) /// - all OpenGL windows (for graphics and user input, can be referenced via IDs). pub struct App { pub(crate) events_loop: glutin::EventsLoop, pub(crate) windows: RefCell<HashMap<window::Id, Window>>, pub(super) exit_on_escape: Cell<bool>, pub(crate) ui: ui::Arrangement, loop_mode: Cell<LoopMode>, /// The `App`'s audio-related API. pub audio: Audio, /// The current state of the `Mouse`. pub mouse: state::Mouse, /// State of the window currently in focus. pub window: state::Window, /// State of the keyboard keys. /// /// `mods` provides state of each of the modifier keys: `shift`, `ctrl`, `alt`, `logo`. /// /// `down` is the set of keys that are currently pressed. /// /// NOTE: `down` this is tracked by the nannou `App` so issues might occur if e.g. a key is /// pressed while the app is in focus and then released when out of focus. Eventually we should /// change this to query the OS somehow, but I don't think `winit` provides a way to do this /// yet. pub keys: state::Keys, } /// An **App**'s audio API. pub struct Audio { event_loop: Arc<cpal::EventLoop>, process_fn_tx: RefCell<Option<mpsc::Sender<audio::stream::ProcessFnMsg>>>, } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub struct Proxy { events_loop_proxy: glutin::EventsLoopProxy, } /// The mode in which the **App** is currently running the event loop. #[derive(Copy, Clone, Debug, PartialEq)] pub enum LoopMode { /// Specifies that the application is continuously looping at a consistent rate. /// /// An application running in the **Rate** loop mode will behave as follows: /// /// 1. Poll for and collect all pending user input. /// `update` is then called with all application events that have occurred. /// /// 2. `update` is called with an `Event::Update`. /// /// 3. `draw` is called. /// /// 4. Check the time and sleep for the remainder of the `update_intervale`. Rate { /// The minimum interval between emitted updates. update_interval: Duration, }, Wait { /// The number of `update`s (and in turn `draw`s) that should occur since the application /// last received a non-`Update` event. updates_following_event: usize, /// The minimum interval between emitted updates. update_interval: Duration, }, } fn update_interval(fps: f64) -> Duration { assert!(fps > 0.0); const NANOSEC_PER_SEC: f64 = 1_000_000_000.0; let interval_nanosecs = NANOSEC_PER_SEC / fps; let secs = (interval_nanosecs / NANOSEC_PER_SEC) as u64; let nanosecs = (interval_nanosecs % NANOSEC_PER_SEC) as u32; Duration::new(secs, nanosecs) } impl LoopMode { pub const DEFAULT_RATE_FPS: f64 = 60.0; pub const DEFAULT_UPDATES_FOLLOWING_EVENT: usize = 3; /// Specify the **Rate** mode with the given frames-per-second. pub fn rate_fps(fps: f64) -> Self { let update_interval = update_interval(fps); LoopMode::Rate { update_interval } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Uses the default update interval. pub fn wait(updates_following_event: usize) -> Self { let update_interval = update_interval(Self::DEFAULT_RATE_FPS); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_max_fps(updates_following_event: usize, max_fps: f64) -> Self { let update_interval = update_interval(max_fps); LoopMode::Wait { updates_following_event, update_interval, } } /// Specify the **Wait** mode with the given number of updates following each non-`Update` /// event. /// /// Waits long enough to ensure loop iteration never occurs faster than the given `max_fps`. pub fn wait_with_interval(updates_following_event: usize, update_interval: Duration) -> Self { LoopMode::Wait { updates_following_event, update_interval, } } } impl Default for LoopMode { fn default() -> Self { LoopMode::rate_fps(Self::DEFAULT_RATE_FPS) } } impl App { pub const ASSETS_DIRECTORY_NAME: &'static str = "assets"; pub const DEFAULT_EXIT_ON_ESCAPE: bool = true; // Create a new `App`. pub(super) fn new(events_loop: glutin::EventsLoop) -> Self { let windows = RefCell::new(HashMap::new()); let exit_on_escape = Cell::new(Self::DEFAULT_EXIT_ON_ESCAPE); let loop_mode = Cell::new(LoopMode::default()); let cpal_event_loop = Arc::new(cpal::EventLoop::new()); let process_fn_tx = RefCell::new(None); let audio = Audio { event_loop: cpal_event_loop, process_fn_tx }; let ui = ui::Arrangement::new(); let mouse = state::Mouse::new(); let window = state::Window::new(); let keys = state::Keys::default(); App { events_loop, windows, exit_on_escape, loop_mode, audio, ui, mouse, window, keys, } } /// Find and return the absolute path to the project's `assets` directory. /// /// This method looks for the assets directory in the following order: /// /// 1. Checks the same directory as the executable. /// 2. Recursively checks exe's parent directories (to a max depth of 5). /// 3. Recursively checks exe's children directories (to a max depth of 3). pub fn
(&self) -> Result<PathBuf, find_folder::Error> { let exe_path = std::env::current_exe()?; find_folder::Search::ParentsThenKids(5, 3) .of(exe_path.parent().expect("executable has no parent directory to search").into()) .for_folder(Self::ASSETS_DIRECTORY_NAME) } /// Begin building a new OpenGL window. pub fn new_window<'a>(&'a self) -> window::Builder<'a,'static> { window::Builder::new(self) } /// The number of windows currently in the application. pub fn window_count(&self) -> usize { self.windows.borrow().len() } /// A reference to the window with the given `Id`. pub fn window(&self, id: window::Id) -> Option<std::cell::Ref<Window>> { let windows = self.windows.borrow(); if!windows.contains_key(&id) { None } else { Some(std::cell::Ref::map(windows, |ws| &ws[&id])) } } /// Return whether or not the `App` is currently set to exit when the `Escape` key is pressed. pub fn exit_on_escape(&self) -> bool { self.exit_on_escape.get() } /// Specify whether or not the app should close when the `Escape` key is pressed. /// /// By default this is `true`. pub fn set_exit_on_escape(&self, b: bool) { self.exit_on_escape.set(b); } /// Returns the **App**'s current **LoopMode**. pub fn loop_mode(&self) -> LoopMode { self.loop_mode.get() } /// Sets the loop mode of the **App**. /// /// Note: Setting the loop mode will not affect anything until the end of the current loop /// iteration. The behaviour of a single loop iteration is described under each of the /// **LoopMode** variants. pub fn set_loop_mode(&self, mode: LoopMode) { self.loop_mode.set(mode); } /// A handle to the **App** that can be shared across threads. /// /// This can be used to "wake up" the **App**'s inner event loop. pub fn create_proxy(&self) -> Proxy { let events_loop_proxy = self.events_loop.create_proxy(); Proxy { events_loop_proxy } } /// Create a new `Ui` for the window with the given `Id`. /// /// Returns `None` if there is no window for the given `window_id`. pub fn new_ui(&self, window_id: window::Id) -> ui::Builder { ui::Builder::new(self, window_id) } } impl Audio { /// Enumerate the available audio devices on the system. /// /// Produces an iterator yielding `audio::Device`s. pub fn devices(&self) -> audio::Devices { let devices = cpal::devices(); audio::Devices { devices } } /// Enumerate the available audio devices on the system that support input streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn input_devices(&self) -> audio::stream::input::Devices { let devices = cpal::input_devices(); audio::stream::input::Devices { devices } } /// Enumerate the available audio devices on the system that support output streams. /// /// Produces an iterator yielding `audio::Device`s. pub fn output_devices(&self) -> audio::stream::output::Devices { let devices = cpal::output_devices(); audio::stream::output::Devices { devices } } /// The current default audio input device. pub fn default_input_device(&self) -> Option<audio::Device> { cpal::default_input_device() .map(|device| audio::Device { device }) } /// The current default audio output device. pub fn default_output_device(&self) -> Option<audio::Device> { cpal::default_output_device() .map(|device| audio::Device { device }) } /// Begin building a new input audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_input_stream<M, F, S>(&self, model: M, capture: F) -> audio::stream::input::Builder<M, F, S> { audio::stream::input::Builder { capture, builder: self.new_stream(model), } } /// Begin building a new output audio stream. /// /// If this is the first time a stream has been created, this method will spawn the /// `cpal::EventLoop::run` method on its own thread, ready to run built streams. pub fn new_output_stream<M, F, S>(&self, model: M, render: F) -> audio::stream::output::Builder<M, F, S> { audio::stream::output::Builder { render, builder: self.new_stream(model), } } // Builder initialisation shared between input and output streams. // // If this is the first time a stream has been created, this method will spawn the // `cpal::EventLoop::run` method on its own thread, ready to run built streams. fn new_stream<M, S>(&self, model: M) -> audio::stream::Builder<M, S> { let process_fn_tx = if self.process_fn_tx.borrow().is_none() { let event_loop = self.event_loop.clone(); let (tx, rx) = mpsc::channel(); let mut loop_context = audio::stream::LoopContext::new(rx); thread::Builder::new() .name("cpal::EventLoop::run thread".into()) .spawn(move || event_loop.run(move |id, data| loop_context.process(id, data))) .expect("failed to spawn cpal::EventLoop::run thread"); *self.process_fn_tx.borrow_mut() = Some(tx.clone()); tx } else { self.process_fn_tx.borrow().as_ref().unwrap().clone() }; audio::stream::Builder { event_loop: self.event_loop.clone(), process_fn_tx: process_fn_tx, model, sample_rate: None, channels: None, frames_per_buffer: None, device: None, sample_format: PhantomData, } } } impl Proxy { /// Wake up the application! /// /// This wakes up the **App**'s inner event loop and inserts an **Awakened** event. pub fn wakeup(&self) -> Result<(), glutin::EventsLoopClosed> { self.events_loop_proxy.wakeup() } }
assets_path
identifier_name