file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
row.rs
//! This module contains definition of table rows stuff use std::io::{Error, Write}; use std::iter::FromIterator; use std::slice::{Iter, IterMut}; // use std::vec::IntoIter; use std::ops::{Index, IndexMut}; use super::Terminal; use super::format::{ColumnPosition, TableFormat}; use super::utils::NEWLINE; use super::Cell; /// Represent a table row made of cells #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct Row { cells: Vec<Cell>, } impl Row { /// Create a new `Row` backed with `cells` vector pub fn new(cells: Vec<Cell>) -> Row { Row { cells } } /// Create an row of length `size`, with empty strings stored pub fn empty() -> Row { Self::new(vec![Cell::default(); 0]) } /// Count the number of column required in the table grid. /// It takes into account horizontal spanning of cells. For /// example, a cell with an hspan of 3 will add 3 column to the grid // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn column_count(&self) -> usize { self.cells.iter().map(|c| c.get_hspan()).sum() } /// Get the number of cells in this row pub fn len(&self) -> usize { self.cells.len() // self.cells.iter().map(|c| c.get_hspan()).sum() } /// Check if the row is empty (has no cell) pub fn is_empty(&self) -> bool { self.cells.is_empty() } /// Get the height of this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] fn get_height(&self) -> usize { let mut height = 1; // Minimum height must be 1 to print empty rows for cell in &self.cells { let h = cell.get_height(); if h > height { height = h; } } height } /// Get the minimum width required by the cell in the column `column`. /// Return 0 if the cell does not exist in this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize { let mut i = 0; for c in &self.cells { if i + c.get_hspan() > column { if c.get_hspan() == 1 { return c.get_width(); } let (lp, rp) = format.get_padding(); let sep = format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); let rem = lp + rp + sep; let mut w = c.get_width(); if w > rem { w -= rem; } else { w = 0; } return (w as f64 / c.get_hspan() as f64).ceil() as usize; } i += c.get_hspan(); } 0 } /// Get the cell at index `idx` pub fn get_cell(&self, idx: usize) -> Option<&Cell> { self.cells.get(idx) } /// Get the mutable cell at index `idx` pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> { self.cells.get_mut(idx) } /// Set the `cell` in the row at the given `idx` index pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> { if idx >= self.len() { return Err("Cannot find cell"); } self.cells[idx] = cell; Ok(()) } /// Append a `cell` at the end of the row pub fn add_cell(&mut self, cell: Cell) { self.cells.push(cell); } /// Insert `cell` at position `index`. If `index` is higher than the row length, /// the cell will be appended at the end pub fn insert_cell(&mut self, index: usize, cell: Cell) { if index < self.cells.len() { self.cells.insert(index, cell); } else { self.add_cell(cell); } } /// Remove the cell at position `index`. Silently skip if this cell does not exist pub fn remove_cell(&mut self, index: usize) { if index < self.cells.len() { self.cells.remove(index); } } /// Returns an immutable iterator over cells pub fn iter(&self) -> Iter<Cell> { self.cells.iter() } /// Returns an mutable iterator over cells pub fn iter_mut(&mut self) -> IterMut<Cell> { self.cells.iter_mut() } /// Internal only fn __print<T: Write +?Sized, F>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], f: F, ) -> Result<usize, Error> where F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>, { let height = self.get_height(); for i in 0..height { //TODO: Wrap this into dedicated function one day out.write_all(&vec![b' '; format.get_indent()])?; format.print_column_separator(out, ColumnPosition::Left)?; let (lp, rp) = format.get_padding(); let mut j = 0; let mut hspan = 0; // The additional offset caused by cell's horizontal spanning while j + hspan < col_width.len() { out.write_all(&vec![b' '; lp])?; // Left padding // skip_r_fill skip filling the end of the last cell if there's no character // delimiting the end of the table let skip_r_fill = (j == col_width.len() - 1) && format.get_column_separator(ColumnPosition::Right).is_none(); match self.get_cell(j) { Some(c) => { // In case of horizontal spanning, width is the sum of all spanned columns' width let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum(); let real_span = c.get_hspan() - 1; w += real_span * (lp + rp) + real_span * format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); // Print cell content f(c, out, i, w, skip_r_fill)?; hspan += real_span; // Add span to offset } None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?, }; out.write_all(&vec![b' '; rp])?; // Right padding if j + hspan < col_width.len() - 1 { format.print_column_separator(out, ColumnPosition::Intern)?; } j += 1; } format.print_column_separator(out, ColumnPosition::Right)?; out.write_all(NEWLINE)?; } Ok(height) } /// Print the row to `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print<T: Write +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print) } /// Print the row to terminal `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Apply style when needed. returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print_term<T: Terminal +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print_term) } /// Print the row in HTML format to `out`. /// /// If the row is has fewer columns than `col_num`, the row is padded with empty cells. pub fn print_html<T: Write +?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error>
} impl Default for Row { fn default() -> Row { Row::empty() } } impl Index<usize> for Row { type Output = Cell; fn index(&self, idx: usize) -> &Self::Output { &self.cells[idx] } } impl IndexMut<usize> for Row { fn index_mut(&mut self, idx: usize) -> &mut Self::Output { &mut self.cells[idx] } } impl<A: ToString> FromIterator<A> for Row { fn from_iter<T>(iterator: T) -> Row where T: IntoIterator<Item = A>, { Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect()) } } impl<T, A> From<T> for Row where A: ToString, T: IntoIterator<Item = A>, { fn from(it: T) -> Row { Self::from_iter(it) } } impl<'a> IntoIterator for &'a Row { type Item = &'a Cell; type IntoIter = Iter<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // impl IntoIterator for Row { // type Item = Cell; // type IntoIter = IntoIter<Cell>; // fn into_iter(self) -> Self::IntoIter { // self.cells.into_iter() // } // } impl<'a> IntoIterator for &'a mut Row { type Item = &'a mut Cell; type IntoIter = IterMut<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl<S: ToString> Extend<S> for Row { fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) { self.cells .extend(iter.into_iter().map(|s| Cell::new(&s.to_string()))); } } // impl <S: Into<Cell>> Extend<S> for Row { // fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) { // self.cells.extend(iter.into_iter().map(|s| s.into())); // } // } /// This macro simplifies `Row` creation /// /// The syntax support style spec /// # Example /// ``` /// # #[macro_use] extern crate prettytable; /// # fn main() { /// // Create a normal row /// let row1 = row!["Element 1", "Element 2", "Element 3"]; /// // Create a row with all cells formatted with red foreground color, yellow background color /// // bold, italic, align in the center of the cell /// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"]; /// // Create a row with first cell in blue, second one in red, and last one with default style /// let row3 = row![Fb->"blue", Fr->"red", "normal"]; /// // Do something with rows /// # drop(row1); /// # drop(row2); /// # drop(row3); /// # } /// ``` /// /// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method #[macro_export] macro_rules! row { (($($out:tt)*);) => (vec![$($out)*]); (($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]); (($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*)); (($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]); (($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*)); ($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20 ($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*))); } #[cfg(test)] mod tests { use super::*; use Cell; #[test] fn row_default_empty() { let row1 = Row::default(); assert_eq!(row1.len(), 0); assert!(row1.is_empty()); } #[test] fn get_add_set_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); assert!(row.get_mut_cell(12).is_none()); let c1 = row.get_mut_cell(0).unwrap().clone(); assert_eq!(c1.get_content(), "foo"); let c1 = Cell::from(&"baz"); assert!(row.set_cell(c1.clone(), 1000).is_err()); assert!(row.set_cell(c1.clone(), 0).is_ok()); assert_eq!(row.get_cell(0).unwrap().get_content(), "baz"); row.add_cell(c1.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); } #[test] fn insert_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); let cell = Cell::new("baz"); row.insert_cell(1000, cell.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); row.insert_cell(1, cell.clone()); assert_eq!(row.len(), 5); assert_eq!(row.get_cell(1).unwrap().get_content(), "baz"); } #[test] fn remove_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); row.remove_cell(1000); assert_eq!(row.len(), 3); row.remove_cell(1); assert_eq!(row.len(), 2); assert_eq!(row.get_cell(0).unwrap().get_content(), "foo"); assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar"); } #[test] fn extend_row() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); row.extend(vec!["A", "B", "C"]); assert_eq!(row.len(), 6); assert_eq!(row.get_cell(3).unwrap().get_content(), "A"); assert_eq!(row.get_cell(4).unwrap().get_content(), "B"); assert_eq!(row.get_cell(5).unwrap().get_content(), "C"); } }
{ let mut printed_columns = 0; for cell in self.iter() { printed_columns += cell.print_html(out)?; } // Pad with empty cells, if target width is not reached for _ in 0..col_num - printed_columns { Cell::default().print_html(out)?; } Ok(()) }
identifier_body
row.rs
//! This module contains definition of table rows stuff use std::io::{Error, Write}; use std::iter::FromIterator; use std::slice::{Iter, IterMut}; // use std::vec::IntoIter; use std::ops::{Index, IndexMut}; use super::Terminal; use super::format::{ColumnPosition, TableFormat}; use super::utils::NEWLINE; use super::Cell; /// Represent a table row made of cells #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct Row { cells: Vec<Cell>, } impl Row { /// Create a new `Row` backed with `cells` vector pub fn new(cells: Vec<Cell>) -> Row { Row { cells } } /// Create an row of length `size`, with empty strings stored pub fn empty() -> Row { Self::new(vec![Cell::default(); 0]) } /// Count the number of column required in the table grid. /// It takes into account horizontal spanning of cells. For /// example, a cell with an hspan of 3 will add 3 column to the grid // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn column_count(&self) -> usize { self.cells.iter().map(|c| c.get_hspan()).sum() } /// Get the number of cells in this row pub fn len(&self) -> usize { self.cells.len() // self.cells.iter().map(|c| c.get_hspan()).sum() } /// Check if the row is empty (has no cell) pub fn is_empty(&self) -> bool { self.cells.is_empty() } /// Get the height of this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] fn get_height(&self) -> usize { let mut height = 1; // Minimum height must be 1 to print empty rows for cell in &self.cells { let h = cell.get_height(); if h > height { height = h; } } height } /// Get the minimum width required by the cell in the column `column`. /// Return 0 if the cell does not exist in this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize { let mut i = 0; for c in &self.cells { if i + c.get_hspan() > column { if c.get_hspan() == 1 { return c.get_width(); } let (lp, rp) = format.get_padding(); let sep = format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); let rem = lp + rp + sep; let mut w = c.get_width(); if w > rem { w -= rem; } else { w = 0; } return (w as f64 / c.get_hspan() as f64).ceil() as usize; } i += c.get_hspan(); } 0 } /// Get the cell at index `idx` pub fn get_cell(&self, idx: usize) -> Option<&Cell> { self.cells.get(idx) } /// Get the mutable cell at index `idx` pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> { self.cells.get_mut(idx) } /// Set the `cell` in the row at the given `idx` index pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> { if idx >= self.len() { return Err("Cannot find cell"); } self.cells[idx] = cell; Ok(()) } /// Append a `cell` at the end of the row pub fn add_cell(&mut self, cell: Cell) { self.cells.push(cell); } /// Insert `cell` at position `index`. If `index` is higher than the row length, /// the cell will be appended at the end pub fn insert_cell(&mut self, index: usize, cell: Cell) { if index < self.cells.len() { self.cells.insert(index, cell); } else { self.add_cell(cell); } } /// Remove the cell at position `index`. Silently skip if this cell does not exist pub fn remove_cell(&mut self, index: usize) { if index < self.cells.len() { self.cells.remove(index); } } /// Returns an immutable iterator over cells pub fn
(&self) -> Iter<Cell> { self.cells.iter() } /// Returns an mutable iterator over cells pub fn iter_mut(&mut self) -> IterMut<Cell> { self.cells.iter_mut() } /// Internal only fn __print<T: Write +?Sized, F>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], f: F, ) -> Result<usize, Error> where F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>, { let height = self.get_height(); for i in 0..height { //TODO: Wrap this into dedicated function one day out.write_all(&vec![b' '; format.get_indent()])?; format.print_column_separator(out, ColumnPosition::Left)?; let (lp, rp) = format.get_padding(); let mut j = 0; let mut hspan = 0; // The additional offset caused by cell's horizontal spanning while j + hspan < col_width.len() { out.write_all(&vec![b' '; lp])?; // Left padding // skip_r_fill skip filling the end of the last cell if there's no character // delimiting the end of the table let skip_r_fill = (j == col_width.len() - 1) && format.get_column_separator(ColumnPosition::Right).is_none(); match self.get_cell(j) { Some(c) => { // In case of horizontal spanning, width is the sum of all spanned columns' width let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum(); let real_span = c.get_hspan() - 1; w += real_span * (lp + rp) + real_span * format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); // Print cell content f(c, out, i, w, skip_r_fill)?; hspan += real_span; // Add span to offset } None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?, }; out.write_all(&vec![b' '; rp])?; // Right padding if j + hspan < col_width.len() - 1 { format.print_column_separator(out, ColumnPosition::Intern)?; } j += 1; } format.print_column_separator(out, ColumnPosition::Right)?; out.write_all(NEWLINE)?; } Ok(height) } /// Print the row to `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print<T: Write +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print) } /// Print the row to terminal `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Apply style when needed. returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print_term<T: Terminal +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print_term) } /// Print the row in HTML format to `out`. /// /// If the row is has fewer columns than `col_num`, the row is padded with empty cells. pub fn print_html<T: Write +?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error> { let mut printed_columns = 0; for cell in self.iter() { printed_columns += cell.print_html(out)?; } // Pad with empty cells, if target width is not reached for _ in 0..col_num - printed_columns { Cell::default().print_html(out)?; } Ok(()) } } impl Default for Row { fn default() -> Row { Row::empty() } } impl Index<usize> for Row { type Output = Cell; fn index(&self, idx: usize) -> &Self::Output { &self.cells[idx] } } impl IndexMut<usize> for Row { fn index_mut(&mut self, idx: usize) -> &mut Self::Output { &mut self.cells[idx] } } impl<A: ToString> FromIterator<A> for Row { fn from_iter<T>(iterator: T) -> Row where T: IntoIterator<Item = A>, { Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect()) } } impl<T, A> From<T> for Row where A: ToString, T: IntoIterator<Item = A>, { fn from(it: T) -> Row { Self::from_iter(it) } } impl<'a> IntoIterator for &'a Row { type Item = &'a Cell; type IntoIter = Iter<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // impl IntoIterator for Row { // type Item = Cell; // type IntoIter = IntoIter<Cell>; // fn into_iter(self) -> Self::IntoIter { // self.cells.into_iter() // } // } impl<'a> IntoIterator for &'a mut Row { type Item = &'a mut Cell; type IntoIter = IterMut<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl<S: ToString> Extend<S> for Row { fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) { self.cells .extend(iter.into_iter().map(|s| Cell::new(&s.to_string()))); } } // impl <S: Into<Cell>> Extend<S> for Row { // fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) { // self.cells.extend(iter.into_iter().map(|s| s.into())); // } // } /// This macro simplifies `Row` creation /// /// The syntax support style spec /// # Example /// ``` /// # #[macro_use] extern crate prettytable; /// # fn main() { /// // Create a normal row /// let row1 = row!["Element 1", "Element 2", "Element 3"]; /// // Create a row with all cells formatted with red foreground color, yellow background color /// // bold, italic, align in the center of the cell /// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"]; /// // Create a row with first cell in blue, second one in red, and last one with default style /// let row3 = row![Fb->"blue", Fr->"red", "normal"]; /// // Do something with rows /// # drop(row1); /// # drop(row2); /// # drop(row3); /// # } /// ``` /// /// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method #[macro_export] macro_rules! row { (($($out:tt)*);) => (vec![$($out)*]); (($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]); (($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*)); (($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]); (($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*)); ($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20 ($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*))); } #[cfg(test)] mod tests { use super::*; use Cell; #[test] fn row_default_empty() { let row1 = Row::default(); assert_eq!(row1.len(), 0); assert!(row1.is_empty()); } #[test] fn get_add_set_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); assert!(row.get_mut_cell(12).is_none()); let c1 = row.get_mut_cell(0).unwrap().clone(); assert_eq!(c1.get_content(), "foo"); let c1 = Cell::from(&"baz"); assert!(row.set_cell(c1.clone(), 1000).is_err()); assert!(row.set_cell(c1.clone(), 0).is_ok()); assert_eq!(row.get_cell(0).unwrap().get_content(), "baz"); row.add_cell(c1.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); } #[test] fn insert_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); let cell = Cell::new("baz"); row.insert_cell(1000, cell.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); row.insert_cell(1, cell.clone()); assert_eq!(row.len(), 5); assert_eq!(row.get_cell(1).unwrap().get_content(), "baz"); } #[test] fn remove_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); row.remove_cell(1000); assert_eq!(row.len(), 3); row.remove_cell(1); assert_eq!(row.len(), 2); assert_eq!(row.get_cell(0).unwrap().get_content(), "foo"); assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar"); } #[test] fn extend_row() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); row.extend(vec!["A", "B", "C"]); assert_eq!(row.len(), 6); assert_eq!(row.get_cell(3).unwrap().get_content(), "A"); assert_eq!(row.get_cell(4).unwrap().get_content(), "B"); assert_eq!(row.get_cell(5).unwrap().get_content(), "C"); } }
iter
identifier_name
row.rs
//! This module contains definition of table rows stuff use std::io::{Error, Write}; use std::iter::FromIterator; use std::slice::{Iter, IterMut}; // use std::vec::IntoIter; use std::ops::{Index, IndexMut}; use super::Terminal; use super::format::{ColumnPosition, TableFormat}; use super::utils::NEWLINE; use super::Cell; /// Represent a table row made of cells #[derive(Clone, Debug, Hash, PartialEq, Eq)] pub struct Row { cells: Vec<Cell>, } impl Row { /// Create a new `Row` backed with `cells` vector pub fn new(cells: Vec<Cell>) -> Row { Row { cells } } /// Create an row of length `size`, with empty strings stored pub fn empty() -> Row { Self::new(vec![Cell::default(); 0]) } /// Count the number of column required in the table grid. /// It takes into account horizontal spanning of cells. For /// example, a cell with an hspan of 3 will add 3 column to the grid // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn column_count(&self) -> usize { self.cells.iter().map(|c| c.get_hspan()).sum() } /// Get the number of cells in this row pub fn len(&self) -> usize { self.cells.len() // self.cells.iter().map(|c| c.get_hspan()).sum() } /// Check if the row is empty (has no cell) pub fn is_empty(&self) -> bool { self.cells.is_empty() } /// Get the height of this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] fn get_height(&self) -> usize { let mut height = 1; // Minimum height must be 1 to print empty rows for cell in &self.cells { let h = cell.get_height(); if h > height { height = h; } } height }
pub(crate) fn get_column_width(&self, column: usize, format: &TableFormat) -> usize { let mut i = 0; for c in &self.cells { if i + c.get_hspan() > column { if c.get_hspan() == 1 { return c.get_width(); } let (lp, rp) = format.get_padding(); let sep = format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); let rem = lp + rp + sep; let mut w = c.get_width(); if w > rem { w -= rem; } else { w = 0; } return (w as f64 / c.get_hspan() as f64).ceil() as usize; } i += c.get_hspan(); } 0 } /// Get the cell at index `idx` pub fn get_cell(&self, idx: usize) -> Option<&Cell> { self.cells.get(idx) } /// Get the mutable cell at index `idx` pub fn get_mut_cell(&mut self, idx: usize) -> Option<&mut Cell> { self.cells.get_mut(idx) } /// Set the `cell` in the row at the given `idx` index pub fn set_cell(&mut self, cell: Cell, idx: usize) -> Result<(), &str> { if idx >= self.len() { return Err("Cannot find cell"); } self.cells[idx] = cell; Ok(()) } /// Append a `cell` at the end of the row pub fn add_cell(&mut self, cell: Cell) { self.cells.push(cell); } /// Insert `cell` at position `index`. If `index` is higher than the row length, /// the cell will be appended at the end pub fn insert_cell(&mut self, index: usize, cell: Cell) { if index < self.cells.len() { self.cells.insert(index, cell); } else { self.add_cell(cell); } } /// Remove the cell at position `index`. Silently skip if this cell does not exist pub fn remove_cell(&mut self, index: usize) { if index < self.cells.len() { self.cells.remove(index); } } /// Returns an immutable iterator over cells pub fn iter(&self) -> Iter<Cell> { self.cells.iter() } /// Returns an mutable iterator over cells pub fn iter_mut(&mut self) -> IterMut<Cell> { self.cells.iter_mut() } /// Internal only fn __print<T: Write +?Sized, F>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], f: F, ) -> Result<usize, Error> where F: Fn(&Cell, &mut T, usize, usize, bool) -> Result<(), Error>, { let height = self.get_height(); for i in 0..height { //TODO: Wrap this into dedicated function one day out.write_all(&vec![b' '; format.get_indent()])?; format.print_column_separator(out, ColumnPosition::Left)?; let (lp, rp) = format.get_padding(); let mut j = 0; let mut hspan = 0; // The additional offset caused by cell's horizontal spanning while j + hspan < col_width.len() { out.write_all(&vec![b' '; lp])?; // Left padding // skip_r_fill skip filling the end of the last cell if there's no character // delimiting the end of the table let skip_r_fill = (j == col_width.len() - 1) && format.get_column_separator(ColumnPosition::Right).is_none(); match self.get_cell(j) { Some(c) => { // In case of horizontal spanning, width is the sum of all spanned columns' width let mut w = col_width[j + hspan..j + hspan + c.get_hspan()].iter().sum(); let real_span = c.get_hspan() - 1; w += real_span * (lp + rp) + real_span * format .get_column_separator(ColumnPosition::Intern) .map(|_| 1) .unwrap_or_default(); // Print cell content f(c, out, i, w, skip_r_fill)?; hspan += real_span; // Add span to offset } None => f(&Cell::default(), out, i, col_width[j + hspan], skip_r_fill)?, }; out.write_all(&vec![b' '; rp])?; // Right padding if j + hspan < col_width.len() - 1 { format.print_column_separator(out, ColumnPosition::Intern)?; } j += 1; } format.print_column_separator(out, ColumnPosition::Right)?; out.write_all(NEWLINE)?; } Ok(height) } /// Print the row to `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print<T: Write +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print) } /// Print the row to terminal `out`, with `separator` as column separator, and `col_width` /// specifying the width of each columns. Apply style when needed. returns the number of printed lines // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")] pub(crate) fn print_term<T: Terminal +?Sized>( &self, out: &mut T, format: &TableFormat, col_width: &[usize], ) -> Result<usize, Error> { self.__print(out, format, col_width, Cell::print_term) } /// Print the row in HTML format to `out`. /// /// If the row is has fewer columns than `col_num`, the row is padded with empty cells. pub fn print_html<T: Write +?Sized>(&self, out: &mut T, col_num: usize) -> Result<(), Error> { let mut printed_columns = 0; for cell in self.iter() { printed_columns += cell.print_html(out)?; } // Pad with empty cells, if target width is not reached for _ in 0..col_num - printed_columns { Cell::default().print_html(out)?; } Ok(()) } } impl Default for Row { fn default() -> Row { Row::empty() } } impl Index<usize> for Row { type Output = Cell; fn index(&self, idx: usize) -> &Self::Output { &self.cells[idx] } } impl IndexMut<usize> for Row { fn index_mut(&mut self, idx: usize) -> &mut Self::Output { &mut self.cells[idx] } } impl<A: ToString> FromIterator<A> for Row { fn from_iter<T>(iterator: T) -> Row where T: IntoIterator<Item = A>, { Self::new(iterator.into_iter().map(|ref e| Cell::from(e)).collect()) } } impl<T, A> From<T> for Row where A: ToString, T: IntoIterator<Item = A>, { fn from(it: T) -> Row { Self::from_iter(it) } } impl<'a> IntoIterator for &'a Row { type Item = &'a Cell; type IntoIter = Iter<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter() } } // impl IntoIterator for Row { // type Item = Cell; // type IntoIter = IntoIter<Cell>; // fn into_iter(self) -> Self::IntoIter { // self.cells.into_iter() // } // } impl<'a> IntoIterator for &'a mut Row { type Item = &'a mut Cell; type IntoIter = IterMut<'a, Cell>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl<S: ToString> Extend<S> for Row { fn extend<T: IntoIterator<Item = S>>(&mut self, iter: T) { self.cells .extend(iter.into_iter().map(|s| Cell::new(&s.to_string()))); } } // impl <S: Into<Cell>> Extend<S> for Row { // fn extend<T: IntoIterator<Item=S>>(&mut self, iter: T) { // self.cells.extend(iter.into_iter().map(|s| s.into())); // } // } /// This macro simplifies `Row` creation /// /// The syntax support style spec /// # Example /// ``` /// # #[macro_use] extern crate prettytable; /// # fn main() { /// // Create a normal row /// let row1 = row!["Element 1", "Element 2", "Element 3"]; /// // Create a row with all cells formatted with red foreground color, yellow background color /// // bold, italic, align in the center of the cell /// let row2 = row![FrBybic => "Element 1", "Element 2", "Element 3"]; /// // Create a row with first cell in blue, second one in red, and last one with default style /// let row3 = row![Fb->"blue", Fr->"red", "normal"]; /// // Do something with rows /// # drop(row1); /// # drop(row2); /// # drop(row3); /// # } /// ``` /// /// For details about style specifier syntax, check doc for [`Cell::style_spec`](cell/struct.Cell.html#method.style_spec) method #[macro_export] macro_rules! row { (($($out:tt)*);) => (vec![$($out)*]); (($($out:tt)*); $value:expr) => (vec![$($out)* $crate::cell!($value)]); (($($out:tt)*); $value:expr, $($n:tt)*) => ($crate::row!(($($out)* $crate::cell!($value),); $($n)*)); (($($out:tt)*); $style:ident -> $value:expr) => (vec![$($out)* $crate::cell!($style -> $value)]); (($($out:tt)*); $style:ident -> $value:expr, $($n: tt)*) => ($crate::row!(($($out)* $crate::cell!($style -> $value),); $($n)*)); ($($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($content)), *])); // This line may not be needed starting from Rust 1.20 ($style:ident => $($content:expr), *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($style:ident => $($content:expr,) *) => ($crate::Row::new(vec![$($crate::cell!($style -> $content)), *])); ($($content:tt)*) => ($crate::Row::new($crate::row!((); $($content)*))); } #[cfg(test)] mod tests { use super::*; use Cell; #[test] fn row_default_empty() { let row1 = Row::default(); assert_eq!(row1.len(), 0); assert!(row1.is_empty()); } #[test] fn get_add_set_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); assert!(row.get_mut_cell(12).is_none()); let c1 = row.get_mut_cell(0).unwrap().clone(); assert_eq!(c1.get_content(), "foo"); let c1 = Cell::from(&"baz"); assert!(row.set_cell(c1.clone(), 1000).is_err()); assert!(row.set_cell(c1.clone(), 0).is_ok()); assert_eq!(row.get_cell(0).unwrap().get_content(), "baz"); row.add_cell(c1.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); } #[test] fn insert_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); let cell = Cell::new("baz"); row.insert_cell(1000, cell.clone()); assert_eq!(row.len(), 4); assert_eq!(row.get_cell(3).unwrap().get_content(), "baz"); row.insert_cell(1, cell.clone()); assert_eq!(row.len(), 5); assert_eq!(row.get_cell(1).unwrap().get_content(), "baz"); } #[test] fn remove_cell() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); assert_eq!(row.len(), 3); row.remove_cell(1000); assert_eq!(row.len(), 3); row.remove_cell(1); assert_eq!(row.len(), 2); assert_eq!(row.get_cell(0).unwrap().get_content(), "foo"); assert_eq!(row.get_cell(1).unwrap().get_content(), "foobar"); } #[test] fn extend_row() { let mut row = Row::from(vec!["foo", "bar", "foobar"]); row.extend(vec!["A", "B", "C"]); assert_eq!(row.len(), 6); assert_eq!(row.get_cell(3).unwrap().get_content(), "A"); assert_eq!(row.get_cell(4).unwrap().get_content(), "B"); assert_eq!(row.get_cell(5).unwrap().get_content(), "C"); } }
/// Get the minimum width required by the cell in the column `column`. /// Return 0 if the cell does not exist in this row // #[deprecated(since="0.8.0", note="Will become private in future release. See [issue #87](https://github.com/phsym/prettytable-rs/issues/87)")]
random_line_split
codegen.rs
use grammar::{Grammar, NontermName, Rule, Sym, TermName}; pub fn codegen<B:BackendText>(back: &mut B) -> String where // IMO these should not be necessary, see Rust issue #29143 B::Block: RenderIndent { let mut s = String::new(); s = s + &back.prefix(); let indent = back.rule_indent_preference(); let mut cg = Codegen::new(back); for rule in &cg.grammar().rules { // FIXME: make `fn on_rule` take a `&Rule` instead of cloning. let (c, blocks) = cg.on_rule(rule.clone()); let l_a = cg.backend.nonterm_label(rule.left); let b = cg.backend.block(l_a, c); s = s + &b.render_indent(indent); let blocks: String = blocks.iter() .map(|b|b.render_indent(indent)) .collect(); s = s + &blocks; } s = s + &cg.backend.suffix(); return s; } pub trait RenderIndent { fn render_indent(&self, usize) -> String; fn render(&self) -> String { self.render_indent(0) } } pub trait BackendText: Backend where Self::Block: RenderIndent { fn prefix(&self) -> String; fn suffix(&self) -> String; fn rule_indent_preference(&self) -> usize; } pub trait Backend { type Command; type Expr; type Label: Clone; type Block; fn grammar(&self) -> &Grammar<usize>; // (The label generators are all non `&mut self` because in // principle we should generate the labels ahead of time // for any given grammar.) /// L_0 is the central loop of the parser. fn label_0(&self) -> Self::Label; /// R_A_k labels function call return to nonterm N from the /// call associated with A_k. (A_k is unique in the grammar /// and thus we can derive `N` from it in the formalism, but /// it seems simpler to just pass it along in this API here.) fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label; /// L_A labels parse function for A. fn nonterm_label(&self, a: NontermName) -> Self::Label; /// L_A_i labels function for parsing ith alternate α_i of A. fn alternate_label(&self, a_i: (NontermName, usize)) -> Self::Label; /// `L: C` /// (note that `C` must have control flow ending in goto...) fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block; /// Execute this command to report the parse attempt failed. fn report_parse_failure(&self, &str) -> Self::Command; /// Execute this command if something unexpected happened /// in the generated code. fn panic_fail(&self, &str) -> Self::Command; /// the no-op command makes some constructions easier. fn no_op(&self) -> Self::Command; /// `cmd1, cmd2` fn seq(&self, cmd1: Self::Command, cmd2: Self::Command) -> Self::Command; /// `if test { then } fn if_(&self, test: Self::Expr, then: Self::Command) -> Self::Command; /// `if test { then } else { else_ }` fn if_else(&self, test: Self::Expr, then: Self::Command, else_: Self::Command) -> Self::Command; /// `j := j + 1` fn increment_curr(&self) -> Self::Command; /// let L = label; /// `goto L` fn goto(&self, label: Self::Label) -> Self::Command; /// this comes up a lot. fn goto_l0(&self) -> Self::Command { let l0 = self.label_0(); self.goto(l0) } /// `I[j] == a` fn curr_matches_term(&self, a: TermName) -> Self::Expr; /// let x = I[j]; let N = n; /// `x in FIRST(N$)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr; /// let x = I[j]; let α = alpha; /// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test<E:Copy>(&self, a: NontermName, alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr; /// `c_u := create(l, c_u, j)` fn create(&self, l: Self::Label) -> Self::Command; /// `add(l, c_u, j) fn add(&self, l: Self::Label) -> Self::Command; /// `pop(c_u, j)` fn pop(&self) -> Self::Command; } pub struct Codegen<'a, B:Backend+'a> { pub backend: &'a mut B, } impl<'a, C:Backend> Codegen<'a, C> { pub fn new(back: &'a mut C) -> Self { Codegen { backend: back } } pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() } /// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0} pub fn on_term(&self, a: TermName) -> C::Command { let b = &self.backend; let matches = b.curr_matches_term(a); let next_j = b.increment_curr(); let goto_l0 = b.goto_l0(); b.if_else(matches, next_j, goto_l0) } /// code(A_kα, j, X) = /// if test(I[j], X, A_k α) { /// c_u := create(R_A_k, c_u, j), goto L_A /// } else { /// goto L_0 /// } /// R_A_k: pub fn on_nonterm_instance<E:Copy>(&self, (a, k): (NontermName, usize), alpha: &[Sym<E>], x: NontermName) -> (C::Command, C::Label) { let b = &self.backend; let matches = b.test(x, (Some(a), alpha)); let r_a_k = b.return_label(x, (a, k)); let create = b.create(r_a_k); let l_a = b.nonterm_label(a); let goto_la = b.goto(l_a); let create_then_goto_la = b.seq(create, goto_la); let goto_l0 = b.goto_l0(); let c = b.if_else(matches, create_then_goto_la, goto_l0); let l = b.return_label(x, (a, k)); (c, l) } /// code(α, j, X) =... /// /// (driver for calling either of on_term/on_nonterm_instance) pub fn on_symbols(&self, alpha: &[Sym<usize>], x: NontermName) -> (C::Command, Option<C::Label>) {
/ Given alpha = x1 x2.. x_f, shorthand for /// /// code(x1 .. x_f, j, A) /// code( x2.. x_f, j, A) /// ... /// code( x_f, j, A) /// /// Each `code` maps to a command and (potentially) a trailing label; /// therefore concatenating the codes results in a leading command /// and a sequence of blocks. /// The above maps to a command and a sequence of bl pub fn on_symbols_in_prod(&self, alpha: &[Sym<usize>], a: NontermName, end_with: C::Command) -> (C::Command, Vec<C::Block>) { let mut c = self.backend.no_op(); enum BuildState<C:Backend> { FirstCommand, MakeEndBlock { first: C::Command, then: Vec<C::Block>, end: C::Label } } let mut bs: BuildState<C> = BuildState::FirstCommand; for i in 0..alpha.len() { let (c2, opt_label) = self.on_symbols(&alpha[i..], a); c = self.backend.seq(c, c2); if let Some(l) = opt_label { bs = match bs { BuildState::FirstCommand => BuildState::MakeEndBlock { first: c, then: Vec::new(), end: l }, BuildState::MakeEndBlock {first,mut then,end} => { let b = self.backend.block(end, c); then.push(b); BuildState::MakeEndBlock { first: first, then: then, end: l } } }; c = self.backend.no_op(); } } match bs { BuildState::FirstCommand => { c = self.backend.seq(c, end_with); return (c, Vec::new()); } BuildState::MakeEndBlock { first, mut then, end } => { c = self.backend.seq(c, end_with); let b = self.backend.block(end, c); then.push(b); return (first, then); } } } /// code(A ::= empty, j) = pop(c_u, j); goto L_0 /// /// code(A ::= <term> x_2.. x_f, j) = /// j := j + 1 /// code(x2 .. x_f, j, A) /// code( x3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j), /// goto L_0 /// /// code(A ::= X_l x_2.. x_f, j) = /// c_u := create(R_X_l, c_u, j); /// goto L_X; /// R_X_l: code(x_2 .. x_f, j, A) /// code( x_3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j) /// goto L_0 pub fn on_production(&self, a: NontermName, alpha: &[Sym<usize>]) -> (C::Command, Vec<C::Block>) { let end_with = { let b = &self.backend; let pop = b.pop(); let goto_l0 = b.goto_l0(); b.seq(pop, goto_l0) }; if alpha.len() == 0 { return (end_with, Vec::new()); } match alpha[0] { Sym::T(_) => { // The code produced here is only meant to be run if // we've already matched the first terminal of a // non-empty α. It probably would be a good idea to // actually assert such a match, but whatever. let next_j = self.backend.increment_curr(); let (c, blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); (self.backend.seq(next_j, c), blocks) } Sym::N { name: X, x: l } => { let r_X_l = self.backend.return_label(a, (X, l)); let c1 = { let b = &self.backend; let l_X = b.nonterm_label(X); let create = b.create(r_X_l.clone()); let goto_lX = b.goto(l_X); b.seq(create, goto_lX) }; let (c2, more_blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); let block = self.backend.block(r_X_l, c2); let mut blocks = Vec::with_capacity(1 + more_blocks.len()); blocks.push(block); for b in more_blocks { blocks.push(b); } (c1, blocks) } } } /// let the rule for A be `A ::= α_1 |... | α_t` /// /// code(A, j) if A is LL(1) nonterm = /// if test(I[j], A, α_1) { goto L_A_1 } /// ... /// else if test(I[j], A, α_t) { goto L_A_t } /// // (assert unreachable here?) /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// /// code(A, j) if A is not LL(1) nonterm = /// if test(I[j], A, α_1) { add(L_A_1, c_u, j) } /// ... /// if test(I[j], A, α_1) { add(L_A_t, c_u, j) } /// goto L_0 /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// pub fn on_rule(&self, r: Rule<usize>) -> (C::Command, Vec<C::Block>) { let Rule { left: a, right_hands: ref alphas } = r; let c = if self.grammar().ll1s.contains(&a) { let b = &self.backend; let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha)); let l_a_i = b.alternate_label((a, i)); let goto_l_a_i = b.goto(l_a_i); let c2 = b.if_(test, goto_l_a_i); c = b.seq(c, c2); } let u = b.panic_fail(&format!("unreachable for {}", a)); c = b.seq(c, u); c } else { let b = &self.backend; let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha)); let l_a_i = b.alternate_label((a, i)); let add_l_a_i = b.add(l_a_i); let c2 = b.if_(test, add_l_a_i); c = b.seq(c, c2); } let goto_l0 = b.goto_l0(); c = b.seq(c, goto_l0); c }; // each call to `on_production` gives back a command and // a potential block; we turn each command into its // own block, so the total blocks is 2 * |alphas|. let mut blocks = Vec::with_capacity(2*alphas.len()); for (i, alpha) in alphas.iter().enumerate() { let (c, more_blocks) = self.on_production(a, alpha); let b = &self.backend; let l_a_i = b.alternate_label((a, i)); let block = b.block(l_a_i, c); blocks.push(block); for b in more_blocks { blocks.push(b); } } (c, blocks) } }
// FIXME: the infrastructure should be revised to allow me to // inline a sequence of terminals (since they do not need to // be encoded into separate labelled blocks). assert!(alpha.len() > 0); let (s_0, alpha) = alpha.split_at(1); match s_0[0] { Sym::T(t) => (self.on_term(t), None), Sym::N { name: a, x: x_ } => { let (c, l) = self.on_nonterm_instance((a, x_), alpha, x); (c, Some(l)) } } } //
identifier_body
codegen.rs
use grammar::{Grammar, NontermName, Rule, Sym, TermName}; pub fn codegen<B:BackendText>(back: &mut B) -> String where // IMO these should not be necessary, see Rust issue #29143 B::Block: RenderIndent { let mut s = String::new(); s = s + &back.prefix(); let indent = back.rule_indent_preference(); let mut cg = Codegen::new(back); for rule in &cg.grammar().rules { // FIXME: make `fn on_rule` take a `&Rule` instead of cloning. let (c, blocks) = cg.on_rule(rule.clone()); let l_a = cg.backend.nonterm_label(rule.left); let b = cg.backend.block(l_a, c); s = s + &b.render_indent(indent); let blocks: String = blocks.iter() .map(|b|b.render_indent(indent)) .collect(); s = s + &blocks; } s = s + &cg.backend.suffix(); return s; } pub trait RenderIndent { fn render_indent(&self, usize) -> String; fn render(&self) -> String { self.render_indent(0) } } pub trait BackendText: Backend where Self::Block: RenderIndent { fn prefix(&self) -> String; fn suffix(&self) -> String; fn rule_indent_preference(&self) -> usize; } pub trait Backend { type Command; type Expr; type Label: Clone; type Block; fn grammar(&self) -> &Grammar<usize>; // (The label generators are all non `&mut self` because in // principle we should generate the labels ahead of time // for any given grammar.) /// L_0 is the central loop of the parser. fn label_0(&self) -> Self::Label; /// R_A_k labels function call return to nonterm N from the /// call associated with A_k. (A_k is unique in the grammar /// and thus we can derive `N` from it in the formalism, but /// it seems simpler to just pass it along in this API here.) fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label; /// L_A labels parse function for A. fn nonterm_label(&self, a: NontermName) -> Self::Label; /// L_A_i labels function for parsing ith alternate α_i of A. fn alternate_label(&self, a_i: (NontermName, usize)) -> Self::Label; /// `L: C` /// (note that `C` must have control flow ending in goto...) fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block; /// Execute this command to report the parse attempt failed. fn report_parse_failure(&self, &str) -> Self::Command; /// Execute this command if something unexpected happened /// in the generated code. fn panic_fail(&self, &str) -> Self::Command; /// the no-op command makes some constructions easier. fn no_op(&self) -> Self::Command; /// `cmd1, cmd2` fn seq(&self, cmd1: Self::Command, cmd2: Self::Command) -> Self::Command; /// `if test { then } fn if_(&self, test: Self::Expr, then: Self::Command) -> Self::Command; /// `if test { then } else { else_ }` fn if_else(&self, test: Self::Expr, then: Self::Command, else_: Self::Command) -> Self::Command; /// `j := j + 1` fn increment_curr(&self) -> Self::Command; /// let L = label; /// `goto L` fn goto(&self, label: Self::Label) -> Self::Command; /// this comes up a lot. fn goto_l0(&self) -> Self::Command { let l0 = self.label_0(); self.goto(l0) } /// `I[j] == a` fn curr_matches_term(&self, a: TermName) -> Self::Expr; /// let x = I[j]; let N = n; /// `x in FIRST(N$)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr; /// let x = I[j]; let α = alpha; /// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test<E:Copy>(&self, a: NontermName, alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr; /// `c_u := create(l, c_u, j)` fn create(&self, l: Self::Label) -> Self::Command; /// `add(l, c_u, j) fn add(&self, l: Self::Label) -> Self::Command; /// `pop(c_u, j)` fn pop(&self) -> Self::Command; } pub struct Codegen<'a, B:Backend+'a> { pub backend: &'a mut B, } impl<'a, C:Backend> Codegen<'a, C> { pub fn new(back: &'a mut C) -> Self { Codegen { backend: back } } pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() } /// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0} pub fn on_te
f, a: TermName) -> C::Command { let b = &self.backend; let matches = b.curr_matches_term(a); let next_j = b.increment_curr(); let goto_l0 = b.goto_l0(); b.if_else(matches, next_j, goto_l0) } /// code(A_kα, j, X) = /// if test(I[j], X, A_k α) { /// c_u := create(R_A_k, c_u, j), goto L_A /// } else { /// goto L_0 /// } /// R_A_k: pub fn on_nonterm_instance<E:Copy>(&self, (a, k): (NontermName, usize), alpha: &[Sym<E>], x: NontermName) -> (C::Command, C::Label) { let b = &self.backend; let matches = b.test(x, (Some(a), alpha)); let r_a_k = b.return_label(x, (a, k)); let create = b.create(r_a_k); let l_a = b.nonterm_label(a); let goto_la = b.goto(l_a); let create_then_goto_la = b.seq(create, goto_la); let goto_l0 = b.goto_l0(); let c = b.if_else(matches, create_then_goto_la, goto_l0); let l = b.return_label(x, (a, k)); (c, l) } /// code(α, j, X) =... /// /// (driver for calling either of on_term/on_nonterm_instance) pub fn on_symbols(&self, alpha: &[Sym<usize>], x: NontermName) -> (C::Command, Option<C::Label>) { // FIXME: the infrastructure should be revised to allow me to // inline a sequence of terminals (since they do not need to // be encoded into separate labelled blocks). assert!(alpha.len() > 0); let (s_0, alpha) = alpha.split_at(1); match s_0[0] { Sym::T(t) => (self.on_term(t), None), Sym::N { name: a, x: x_ } => { let (c, l) = self.on_nonterm_instance((a, x_), alpha, x); (c, Some(l)) } } } /// Given alpha = x1 x2.. x_f, shorthand for /// /// code(x1 .. x_f, j, A) /// code( x2.. x_f, j, A) /// ... /// code( x_f, j, A) /// /// Each `code` maps to a command and (potentially) a trailing label; /// therefore concatenating the codes results in a leading command /// and a sequence of blocks. /// The above maps to a command and a sequence of bl pub fn on_symbols_in_prod(&self, alpha: &[Sym<usize>], a: NontermName, end_with: C::Command) -> (C::Command, Vec<C::Block>) { let mut c = self.backend.no_op(); enum BuildState<C:Backend> { FirstCommand, MakeEndBlock { first: C::Command, then: Vec<C::Block>, end: C::Label } } let mut bs: BuildState<C> = BuildState::FirstCommand; for i in 0..alpha.len() { let (c2, opt_label) = self.on_symbols(&alpha[i..], a); c = self.backend.seq(c, c2); if let Some(l) = opt_label { bs = match bs { BuildState::FirstCommand => BuildState::MakeEndBlock { first: c, then: Vec::new(), end: l }, BuildState::MakeEndBlock {first,mut then,end} => { let b = self.backend.block(end, c); then.push(b); BuildState::MakeEndBlock { first: first, then: then, end: l } } }; c = self.backend.no_op(); } } match bs { BuildState::FirstCommand => { c = self.backend.seq(c, end_with); return (c, Vec::new()); } BuildState::MakeEndBlock { first, mut then, end } => { c = self.backend.seq(c, end_with); let b = self.backend.block(end, c); then.push(b); return (first, then); } } } /// code(A ::= empty, j) = pop(c_u, j); goto L_0 /// /// code(A ::= <term> x_2.. x_f, j) = /// j := j + 1 /// code(x2 .. x_f, j, A) /// code( x3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j), /// goto L_0 /// /// code(A ::= X_l x_2.. x_f, j) = /// c_u := create(R_X_l, c_u, j); /// goto L_X; /// R_X_l: code(x_2 .. x_f, j, A) /// code( x_3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j) /// goto L_0 pub fn on_production(&self, a: NontermName, alpha: &[Sym<usize>]) -> (C::Command, Vec<C::Block>) { let end_with = { let b = &self.backend; let pop = b.pop(); let goto_l0 = b.goto_l0(); b.seq(pop, goto_l0) }; if alpha.len() == 0 { return (end_with, Vec::new()); } match alpha[0] { Sym::T(_) => { // The code produced here is only meant to be run if // we've already matched the first terminal of a // non-empty α. It probably would be a good idea to // actually assert such a match, but whatever. let next_j = self.backend.increment_curr(); let (c, blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); (self.backend.seq(next_j, c), blocks) } Sym::N { name: X, x: l } => { let r_X_l = self.backend.return_label(a, (X, l)); let c1 = { let b = &self.backend; let l_X = b.nonterm_label(X); let create = b.create(r_X_l.clone()); let goto_lX = b.goto(l_X); b.seq(create, goto_lX) }; let (c2, more_blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); let block = self.backend.block(r_X_l, c2); let mut blocks = Vec::with_capacity(1 + more_blocks.len()); blocks.push(block); for b in more_blocks { blocks.push(b); } (c1, blocks) } } } /// let the rule for A be `A ::= α_1 |... | α_t` /// /// code(A, j) if A is LL(1) nonterm = /// if test(I[j], A, α_1) { goto L_A_1 } /// ... /// else if test(I[j], A, α_t) { goto L_A_t } /// // (assert unreachable here?) /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// /// code(A, j) if A is not LL(1) nonterm = /// if test(I[j], A, α_1) { add(L_A_1, c_u, j) } /// ... /// if test(I[j], A, α_1) { add(L_A_t, c_u, j) } /// goto L_0 /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// pub fn on_rule(&self, r: Rule<usize>) -> (C::Command, Vec<C::Block>) { let Rule { left: a, right_hands: ref alphas } = r; let c = if self.grammar().ll1s.contains(&a) { let b = &self.backend; let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha)); let l_a_i = b.alternate_label((a, i)); let goto_l_a_i = b.goto(l_a_i); let c2 = b.if_(test, goto_l_a_i); c = b.seq(c, c2); } let u = b.panic_fail(&format!("unreachable for {}", a)); c = b.seq(c, u); c } else { let b = &self.backend; let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha)); let l_a_i = b.alternate_label((a, i)); let add_l_a_i = b.add(l_a_i); let c2 = b.if_(test, add_l_a_i); c = b.seq(c, c2); } let goto_l0 = b.goto_l0(); c = b.seq(c, goto_l0); c }; // each call to `on_production` gives back a command and // a potential block; we turn each command into its // own block, so the total blocks is 2 * |alphas|. let mut blocks = Vec::with_capacity(2*alphas.len()); for (i, alpha) in alphas.iter().enumerate() { let (c, more_blocks) = self.on_production(a, alpha); let b = &self.backend; let l_a_i = b.alternate_label((a, i)); let block = b.block(l_a_i, c); blocks.push(block); for b in more_blocks { blocks.push(b); } } (c, blocks) } }
rm(&sel
identifier_name
codegen.rs
use grammar::{Grammar, NontermName, Rule, Sym, TermName}; pub fn codegen<B:BackendText>(back: &mut B) -> String where // IMO these should not be necessary, see Rust issue #29143 B::Block: RenderIndent { let mut s = String::new(); s = s + &back.prefix(); let indent = back.rule_indent_preference(); let mut cg = Codegen::new(back); for rule in &cg.grammar().rules { // FIXME: make `fn on_rule` take a `&Rule` instead of cloning. let (c, blocks) = cg.on_rule(rule.clone()); let l_a = cg.backend.nonterm_label(rule.left); let b = cg.backend.block(l_a, c); s = s + &b.render_indent(indent); let blocks: String = blocks.iter() .map(|b|b.render_indent(indent)) .collect(); s = s + &blocks; } s = s + &cg.backend.suffix(); return s; } pub trait RenderIndent { fn render_indent(&self, usize) -> String; fn render(&self) -> String { self.render_indent(0) } } pub trait BackendText: Backend where Self::Block: RenderIndent { fn prefix(&self) -> String; fn suffix(&self) -> String; fn rule_indent_preference(&self) -> usize; } pub trait Backend { type Command; type Expr; type Label: Clone; type Block; fn grammar(&self) -> &Grammar<usize>; // (The label generators are all non `&mut self` because in // principle we should generate the labels ahead of time // for any given grammar.) /// L_0 is the central loop of the parser. fn label_0(&self) -> Self::Label; /// R_A_k labels function call return to nonterm N from the /// call associated with A_k. (A_k is unique in the grammar /// and thus we can derive `N` from it in the formalism, but /// it seems simpler to just pass it along in this API here.) fn return_label(&self, n: NontermName, a_k: (NontermName, usize)) -> Self::Label; /// L_A labels parse function for A. fn nonterm_label(&self, a: NontermName) -> Self::Label; /// L_A_i labels function for parsing ith alternate α_i of A. fn alternate_label(&self, a_i: (NontermName, usize)) -> Self::Label; /// `L: C` /// (note that `C` must have control flow ending in goto...) fn block(&self, l: Self::Label, c: Self::Command) -> Self::Block; /// Execute this command to report the parse attempt failed. fn report_parse_failure(&self, &str) -> Self::Command; /// Execute this command if something unexpected happened /// in the generated code. fn panic_fail(&self, &str) -> Self::Command; /// the no-op command makes some constructions easier. fn no_op(&self) -> Self::Command; /// `cmd1, cmd2` fn seq(&self, cmd1: Self::Command, cmd2: Self::Command) -> Self::Command; /// `if test { then } fn if_(&self, test: Self::Expr, then: Self::Command) -> Self::Command; /// `if test { then } else { else_ }` fn if_else(&self, test: Self::Expr, then: Self::Command, else_: Self::Command) -> Self::Command; /// `j := j + 1` fn increment_curr(&self) -> Self::Command; /// let L = label; /// `goto L` fn goto(&self, label: Self::Label) -> Self::Command; /// this comes up a lot. fn goto_l0(&self) -> Self::Command { let l0 = self.label_0(); self.goto(l0) } /// `I[j] == a` fn curr_matches_term(&self, a: TermName) -> Self::Expr; /// let x = I[j]; let N = n; /// `x in FIRST(N$)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test_end<E:Copy>(&self, n: NontermName) -> Self::Expr; /// let x = I[j]; let α = alpha; /// `x in FIRST(α) or empty in FIRST(α) and x in FOLLOW(A)` /// /// The leading optional component in alpha is meant to be /// the first element of alpha, if it is present at all. fn test<E:Copy>(&self, a: NontermName, alpha: (Option<NontermName>, &[Sym<E>])) -> Self::Expr; /// `c_u := create(l, c_u, j)` fn create(&self, l: Self::Label) -> Self::Command; /// `add(l, c_u, j) fn add(&self, l: Self::Label) -> Self::Command; /// `pop(c_u, j)` fn pop(&self) -> Self::Command; } pub struct Codegen<'a, B:Backend+'a> { pub backend: &'a mut B, } impl<'a, C:Backend> Codegen<'a, C> { pub fn new(back: &'a mut C) -> Self { Codegen { backend: back } } pub fn grammar(&self) -> &Grammar<usize> { self.backend.grammar() } /// code(aα, j, X) = if I[j] = a {j := j+1} else {goto L_0} pub fn on_term(&self, a: TermName) -> C::Command { let b = &self.backend; let matches = b.curr_matches_term(a); let next_j = b.increment_curr(); let goto_l0 = b.goto_l0(); b.if_else(matches, next_j, goto_l0) } /// code(A_kα, j, X) = /// if test(I[j], X, A_k α) { /// c_u := create(R_A_k, c_u, j), goto L_A /// } else { /// goto L_0 /// } /// R_A_k: pub fn on_nonterm_instance<E:Copy>(&self, (a, k): (NontermName, usize), alpha: &[Sym<E>], x: NontermName) -> (C::Command, C::Label) { let b = &self.backend; let matches = b.test(x, (Some(a), alpha)); let r_a_k = b.return_label(x, (a, k)); let create = b.create(r_a_k); let l_a = b.nonterm_label(a); let goto_la = b.goto(l_a); let create_then_goto_la = b.seq(create, goto_la); let goto_l0 = b.goto_l0(); let c = b.if_else(matches, create_then_goto_la, goto_l0); let l = b.return_label(x, (a, k)); (c, l) } /// code(α, j, X) =... /// /// (driver for calling either of on_term/on_nonterm_instance) pub fn on_symbols(&self, alpha: &[Sym<usize>], x: NontermName) -> (C::Command, Option<C::Label>) { // FIXME: the infrastructure should be revised to allow me to // inline a sequence of terminals (since they do not need to // be encoded into separate labelled blocks). assert!(alpha.len() > 0); let (s_0, alpha) = alpha.split_at(1); match s_0[0] { Sym::T(t) => (self.on_term(t), None), Sym::N { name: a, x: x_ } => { let (c, l) = self.on_nonterm_instance((a, x_), alpha, x); (c, Some(l)) } } } /// Given alpha = x1 x2.. x_f, shorthand for /// /// code(x1 .. x_f, j, A) /// code( x2.. x_f, j, A) /// ... /// code( x_f, j, A) /// /// Each `code` maps to a command and (potentially) a trailing label; /// therefore concatenating the codes results in a leading command /// and a sequence of blocks. /// The above maps to a command and a sequence of bl pub fn on_symbols_in_prod(&self, alpha: &[Sym<usize>], a: NontermName, end_with: C::Command) -> (C::Command, Vec<C::Block>) { let mut c = self.backend.no_op(); enum BuildState<C:Backend> { FirstCommand, MakeEndBlock { first: C::Command, then: Vec<C::Block>, end: C::Label } } let mut bs: BuildState<C> = BuildState::FirstCommand; for i in 0..alpha.len() { let (c2, opt_label) = self.on_symbols(&alpha[i..], a); c = self.backend.seq(c, c2); if let Some(l) = opt_label { bs = match bs { BuildState::FirstCommand => BuildState::MakeEndBlock { first: c, then: Vec::new(), end: l }, BuildState::MakeEndBlock {first,mut then,end} => { let b = self.backend.block(end, c); then.push(b); BuildState::MakeEndBlock { first: first, then: then, end: l } } }; c = self.backend.no_op(); } } match bs { BuildState::FirstCommand => { c = self.backend.seq(c, end_with); return (c, Vec::new()); } BuildState::MakeEndBlock { first, mut then, end } => { c = self.backend.seq(c, end_with); let b = self.backend.block(end, c); then.push(b); return (first, then); } } } /// code(A ::= empty, j) = pop(c_u, j); goto L_0 /// /// code(A ::= <term> x_2.. x_f, j) = /// j := j + 1 /// code(x2 .. x_f, j, A) /// code( x3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j), /// goto L_0 /// /// code(A ::= X_l x_2.. x_f, j) = /// c_u := create(R_X_l, c_u, j); /// goto L_X; /// R_X_l: code(x_2 .. x_f, j, A) /// code( x_3.. x_f, j, A) /// ... /// code( x_f, j, A) /// pop(c_u, j) /// goto L_0 pub fn on_production(&self, a: NontermName, alpha: &[Sym<usize>]) -> (C::Command, Vec<C::Block>) { let end_with = { let b = &self.backend; let pop = b.pop(); let goto_l0 = b.goto_l0(); b.seq(pop, goto_l0) }; if alpha.len() == 0 { return (end_with, Vec::new()); } match alpha[0] { Sym::T(_) => { // The code produced here is only meant to be run if // we've already matched the first terminal of a // non-empty α. It probably would be a good idea to // actually assert such a match, but whatever. let next_j = self.backend.increment_curr(); let (c, blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); (self.backend.seq(next_j, c), blocks) } Sym::N { name: X, x: l } => { let r_X_l = self.backend.return_label(a, (X, l)); let c1 = { let b = &self.backend; let l_X = b.nonterm_label(X); let create = b.create(r_X_l.clone()); let goto_lX = b.goto(l_X); b.seq(create, goto_lX) }; let (c2, more_blocks) = self.on_symbols_in_prod(&alpha[1..], a, end_with); let block = self.backend.block(r_X_l, c2); let mut blocks = Vec::with_capacity(1 + more_blocks.len()); blocks.push(block); for b in more_blocks { blocks.push(b); } (c1, blocks) } } } /// let the rule for A be `A ::= α_1 |... | α_t` /// /// code(A, j) if A is LL(1) nonterm = /// if test(I[j], A, α_1) { goto L_A_1 } /// ... /// else if test(I[j], A, α_t) { goto L_A_t } /// // (assert unreachable here?) /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// /// code(A, j) if A is not LL(1) nonterm = /// if test(I[j], A, α_1) { add(L_A_1, c_u, j) } /// ... /// if test(I[j], A, α_1) { add(L_A_t, c_u, j) } /// goto L_0 /// L_A_1: code(A ::= α_1, j) ///... /// L_A_t: code(A ::= α_t, j) /// pub fn on_rule(&self, r: Rule<usize>) -> (C::Command, Vec<C::Block>) { let Rule { left: a, right_hands: ref alphas } = r; let c = if self.grammar().ll1s.contains(&a) { let b = &self.backend; let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha)); let l_a_i = b.alternate_label((a, i)); let goto_l_a_i = b.goto(l_a_i); let c2 = b.if_(test, goto_l_a_i); c = b.seq(c, c2); } let u = b.panic_fail(&format!("unreachable for {}", a)); c = b.seq(c, u); c } else { let b = &self.backend;
let l_a_i = b.alternate_label((a, i)); let add_l_a_i = b.add(l_a_i); let c2 = b.if_(test, add_l_a_i); c = b.seq(c, c2); } let goto_l0 = b.goto_l0(); c = b.seq(c, goto_l0); c }; // each call to `on_production` gives back a command and // a potential block; we turn each command into its // own block, so the total blocks is 2 * |alphas|. let mut blocks = Vec::with_capacity(2*alphas.len()); for (i, alpha) in alphas.iter().enumerate() { let (c, more_blocks) = self.on_production(a, alpha); let b = &self.backend; let l_a_i = b.alternate_label((a, i)); let block = b.block(l_a_i, c); blocks.push(block); for b in more_blocks { blocks.push(b); } } (c, blocks) } }
let mut c = b.no_op(); for (i, alpha) in alphas.iter().enumerate() { let test = b.test(a, (None, alpha));
random_line_split
mod.rs
use std::{ convert::TryInto, io::SeekFrom, mem::size_of, path::{Path, PathBuf}, }; use bincode::{deserialize, serialize_into, serialized_size}; use cfg_if::cfg_if; use once_cell::sync::OnceCell; use serde::{de::DeserializeOwned, Serialize}; use tokio::{ fs::{create_dir, remove_file, OpenOptions}, io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt}, }; #[cfg(feature = "replication")] use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE; use crate::{ config::persistence::PersistenceConfig, node::{ event::{Event, EventLog}, persistence::{snapshot::Snapshot, PersistenceError}, Queue, }, }; /// Queue log file name const QUEUE_FILE: &str = "queue_log"; /// Queue compacted log file name const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log"; pub struct Log<'c> { /// Persistence config config: &'c PersistenceConfig<'c>, /// Internal instance of [`Snapshot`] driver /// /// Due to limitations of current replication storage implementation /// it is impossible to rely only on [`Log`] driver to save event log, /// so [`Snapshot`] driver is being used to fill the gap. snapshot: OnceCell<Snapshot<'c>>, } impl<'c> Log<'c> { pub fn new(config: &'c PersistenceConfig) -> Self { Log { config, snapshot: OnceCell::new(), } } /// Make log entry from serializable source /// /// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other. /// ``` /// +---------+ /// |Entry len| /// +---------+ /// | | /// | Entry | /// | | /// +---------+ /// ``` fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError> where S: Serialize, { let size = serialized_size(source).map_err(PersistenceError::SerializationError)?; debug!("Log entry size: {}", size); let capacity = size_of::<u64>() + TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?; let mut buf = Vec::with_capacity(capacity); buf.extend(&size.to_le_bytes()); buf.resize(capacity, 0); serialize_into(&mut buf[size_of::<u64>()..], source) .map_err(PersistenceError::SerializationError)?; Ok(buf) } /// Get buffer of log entries from byte source async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError> where T: DeserializeOwned, S: AsyncSeek + AsyncRead + Unpin, { let mut entries = Vec::new(); let source_size = source .seek(SeekFrom::End(0)) .await .map_err(PersistenceError::from)?; debug!("Log source size: {}", source_size); source .seek(SeekFrom::Start(0)) .await .map_err(PersistenceError::from)?; let mut buf = Vec::new(); while source .seek(SeekFrom::Current(0)) .await .map_err(PersistenceError::from)? < source_size { let size = source.read_u64_le().await.map_err(PersistenceError::from)?; debug!("Log entry size: {}", size); buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?); source .take(size) .read_buf(&mut buf) .await .map_err(PersistenceError::from)?; entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?); buf.clear(); } Ok(entries) } /// Appends [make_log_entry] result of `source` to `destination` /// /// [make_log_entry]: Log::make_log_entry async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError> where P: AsRef<Path>, S: Serialize, { let path = self.config.path.join(destination); debug!("Appending to {}", path.display()); if let Some(parent) = path.parent() { if!parent.is_dir() { create_dir(&parent).await.map_err(PersistenceError::from)?; } } OpenOptions::new() .create(true) .append(true) .open(path) .await .map_err(PersistenceError::from)? .write_all(&Self::make_log_entry(source)?) .await .map_err(PersistenceError::from) } /// Get log entries from `source` log file using [parse_log] /// /// [parse_log]: Log::parse_log async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError> where S: DeserializeOwned, P: AsRef<Path>,
/// Append single event to `source` log file (usually queue name) pub async fn persist_event<P>( &self, event: &Event<'_>, source: P, ) -> Result<(), PersistenceError> where P: AsRef<Path>, { self.append(event, source.as_ref().join(QUEUE_FILE)).await } /// Restore database events from `source` log file (usually queue name) /// /// If specified in [`PersistenceConfig`], compaction will be executed after successful loading. pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError> where P: AsRef<Path>, DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned, { let events = match self .load::<Event, _>(source.as_ref().join(QUEUE_FILE)) .await { Ok(events) => events, Err(PersistenceError::FileOpenError(e)) => { error!("Log file not found: {}", e); Vec::new() } Err(e) => return Err(e), }; let database = if self.config.compaction { let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE); let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await { Ok(mut database) => { database.apply_log(events); database } Err(PersistenceError::FileOpenError(e)) => { error!("Compaction file not found: {}", e); DB::from_log(events) } Err(e) => return Err(e), }; self.get_snapshot() .persist(&inner_db, &compaction_path) .await?; match self.prune(&source).await { Err(PersistenceError::FileOpenError(_)) | Ok(_) => (), Err(e) => return Err(e), }; inner_db } else { DB::from_log(events) }; cfg_if! { if #[cfg(feature = "replication")] { // Thanks to GC threshold, it's currently impossible to use log driver let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await { Ok(storage) => storage, Err(PersistenceError::FileOpenError(e)) => { error!("{}", e); None }, Err(e) => return Err(e) }; let queue = Queue::new(database, replication_storage); } else { let queue = Queue::new(database); } } Ok(queue) } /// Prune `queue` log file async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError> where P: AsRef<Path>, { let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()] .iter() .collect::<PathBuf>(); debug!("Pruning {}", path.display()); remove_file(path).await.map_err(PersistenceError::from) } /// Get shared [`Snapshot`] instance fn get_snapshot(&self) -> &Snapshot<'_> { self.snapshot.get_or_init(|| Snapshot::new(self.config)) } } #[cfg(test)] mod tests { use std::{borrow::Cow, io::Cursor}; use maybe_owned::MaybeOwned; use spartan_lib::core::{ db::TreeDatabase, dispatcher::StatusAwareDispatcher, message::{builder::MessageBuilder, Message}, payload::Dispatchable, }; use tempfile::{NamedTempFile, TempDir}; use super::*; use crate::{config::persistence::Persistence, node::DB}; #[tokio::test] async fn test_append_read() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); log.append(&String::from("Hello, world"), file.path()) .await .unwrap(); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert_eq!(entries.len(), 1); assert_eq!(entries.first().unwrap(), &String::from("Hello, world")); } #[tokio::test] async fn test_empty_file_load() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert!(entries.is_empty()); } #[tokio::test] async fn test_serialize_log_entry() { let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap(); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry)) .await .unwrap(); assert_eq!(parsed.len(), 1); assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]); } #[tokio::test] async fn test_multiple_log_entries() { let mut entries = Vec::new(); entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap()); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries)) .await .unwrap(); assert_eq!(parsed.len(), 3); assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); } #[tokio::test] async fn test_persist_and_restore_from_events() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: false, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); } #[tokio::test] async fn test_compaction() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: true, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); assert!(matches!( log.load::<Event, _>(Path::new("test").join(QUEUE_FILE)) .await .unwrap_err(), PersistenceError::FileOpenError(_) )); let snapshot = Snapshot::new(&config); let mut database: TreeDatabase<Message> = snapshot .load(Path::new("test").join(QUEUE_COMPACTION_FILE)) .await .unwrap(); assert_eq!(database.pop().unwrap().body(), "Hello"); } }
{ let path = self.config.path.join(source); debug!("Loading from {}", path.display()); let mut file = OpenOptions::new() .read(true) .open(path) .await .map_err(PersistenceError::from)?; Self::parse_log(&mut file).await }
identifier_body
mod.rs
use std::{ convert::TryInto, io::SeekFrom, mem::size_of, path::{Path, PathBuf}, }; use bincode::{deserialize, serialize_into, serialized_size}; use cfg_if::cfg_if; use once_cell::sync::OnceCell; use serde::{de::DeserializeOwned, Serialize}; use tokio::{ fs::{create_dir, remove_file, OpenOptions}, io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt}, }; #[cfg(feature = "replication")] use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE; use crate::{ config::persistence::PersistenceConfig, node::{ event::{Event, EventLog}, persistence::{snapshot::Snapshot, PersistenceError}, Queue, }, }; /// Queue log file name const QUEUE_FILE: &str = "queue_log"; /// Queue compacted log file name const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log"; pub struct Log<'c> { /// Persistence config config: &'c PersistenceConfig<'c>, /// Internal instance of [`Snapshot`] driver /// /// Due to limitations of current replication storage implementation /// it is impossible to rely only on [`Log`] driver to save event log, /// so [`Snapshot`] driver is being used to fill the gap. snapshot: OnceCell<Snapshot<'c>>, } impl<'c> Log<'c> { pub fn new(config: &'c PersistenceConfig) -> Self { Log { config, snapshot: OnceCell::new(), } } /// Make log entry from serializable source /// /// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other. /// ``` /// +---------+ /// |Entry len| /// +---------+ /// | | /// | Entry | /// | | /// +---------+ /// ``` fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError> where S: Serialize, { let size = serialized_size(source).map_err(PersistenceError::SerializationError)?; debug!("Log entry size: {}", size); let capacity = size_of::<u64>() + TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?; let mut buf = Vec::with_capacity(capacity); buf.extend(&size.to_le_bytes()); buf.resize(capacity, 0); serialize_into(&mut buf[size_of::<u64>()..], source) .map_err(PersistenceError::SerializationError)?; Ok(buf) } /// Get buffer of log entries from byte source async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError> where T: DeserializeOwned, S: AsyncSeek + AsyncRead + Unpin, { let mut entries = Vec::new(); let source_size = source .seek(SeekFrom::End(0)) .await .map_err(PersistenceError::from)?; debug!("Log source size: {}", source_size); source .seek(SeekFrom::Start(0)) .await .map_err(PersistenceError::from)?; let mut buf = Vec::new(); while source .seek(SeekFrom::Current(0)) .await .map_err(PersistenceError::from)? < source_size { let size = source.read_u64_le().await.map_err(PersistenceError::from)?; debug!("Log entry size: {}", size); buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?); source .take(size) .read_buf(&mut buf) .await .map_err(PersistenceError::from)?; entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?); buf.clear(); } Ok(entries) } /// Appends [make_log_entry] result of `source` to `destination` /// /// [make_log_entry]: Log::make_log_entry async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError> where P: AsRef<Path>, S: Serialize, { let path = self.config.path.join(destination); debug!("Appending to {}", path.display()); if let Some(parent) = path.parent() { if!parent.is_dir() { create_dir(&parent).await.map_err(PersistenceError::from)?; } } OpenOptions::new() .create(true) .append(true) .open(path) .await .map_err(PersistenceError::from)? .write_all(&Self::make_log_entry(source)?) .await .map_err(PersistenceError::from) } /// Get log entries from `source` log file using [parse_log] /// /// [parse_log]: Log::parse_log async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError> where S: DeserializeOwned, P: AsRef<Path>, { let path = self.config.path.join(source); debug!("Loading from {}", path.display()); let mut file = OpenOptions::new() .read(true) .open(path) .await .map_err(PersistenceError::from)?; Self::parse_log(&mut file).await } /// Append single event to `source` log file (usually queue name) pub async fn persist_event<P>( &self, event: &Event<'_>, source: P, ) -> Result<(), PersistenceError> where P: AsRef<Path>, { self.append(event, source.as_ref().join(QUEUE_FILE)).await } /// Restore database events from `source` log file (usually queue name) /// /// If specified in [`PersistenceConfig`], compaction will be executed after successful loading. pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError> where P: AsRef<Path>, DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned, { let events = match self .load::<Event, _>(source.as_ref().join(QUEUE_FILE)) .await { Ok(events) => events, Err(PersistenceError::FileOpenError(e)) => { error!("Log file not found: {}", e); Vec::new() } Err(e) => return Err(e), }; let database = if self.config.compaction { let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE); let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await { Ok(mut database) => { database.apply_log(events); database } Err(PersistenceError::FileOpenError(e)) => { error!("Compaction file not found: {}", e); DB::from_log(events) } Err(e) => return Err(e), }; self.get_snapshot() .persist(&inner_db, &compaction_path) .await?; match self.prune(&source).await { Err(PersistenceError::FileOpenError(_)) | Ok(_) => (), Err(e) => return Err(e), }; inner_db } else { DB::from_log(events) }; cfg_if! { if #[cfg(feature = "replication")] { // Thanks to GC threshold, it's currently impossible to use log driver let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await { Ok(storage) => storage, Err(PersistenceError::FileOpenError(e)) => { error!("{}", e); None }, Err(e) => return Err(e) }; let queue = Queue::new(database, replication_storage); } else { let queue = Queue::new(database); } } Ok(queue) } /// Prune `queue` log file async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError> where P: AsRef<Path>, { let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()] .iter() .collect::<PathBuf>(); debug!("Pruning {}", path.display()); remove_file(path).await.map_err(PersistenceError::from) } /// Get shared [`Snapshot`] instance fn get_snapshot(&self) -> &Snapshot<'_> { self.snapshot.get_or_init(|| Snapshot::new(self.config)) } } #[cfg(test)] mod tests { use std::{borrow::Cow, io::Cursor}; use maybe_owned::MaybeOwned; use spartan_lib::core::{ db::TreeDatabase, dispatcher::StatusAwareDispatcher, message::{builder::MessageBuilder, Message}, payload::Dispatchable, }; use tempfile::{NamedTempFile, TempDir}; use super::*; use crate::{config::persistence::Persistence, node::DB}; #[tokio::test] async fn test_append_read() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); log.append(&String::from("Hello, world"), file.path()) .await .unwrap(); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert_eq!(entries.len(), 1); assert_eq!(entries.first().unwrap(), &String::from("Hello, world")); } #[tokio::test] async fn test_empty_file_load() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert!(entries.is_empty()); } #[tokio::test] async fn test_serialize_log_entry() { let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap(); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry)) .await .unwrap(); assert_eq!(parsed.len(), 1); assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]); } #[tokio::test] async fn
() { let mut entries = Vec::new(); entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap()); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries)) .await .unwrap(); assert_eq!(parsed.len(), 3); assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); } #[tokio::test] async fn test_persist_and_restore_from_events() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: false, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); } #[tokio::test] async fn test_compaction() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: true, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); assert!(matches!( log.load::<Event, _>(Path::new("test").join(QUEUE_FILE)) .await .unwrap_err(), PersistenceError::FileOpenError(_) )); let snapshot = Snapshot::new(&config); let mut database: TreeDatabase<Message> = snapshot .load(Path::new("test").join(QUEUE_COMPACTION_FILE)) .await .unwrap(); assert_eq!(database.pop().unwrap().body(), "Hello"); } }
test_multiple_log_entries
identifier_name
mod.rs
use std::{ convert::TryInto, io::SeekFrom, mem::size_of, path::{Path, PathBuf}, }; use bincode::{deserialize, serialize_into, serialized_size}; use cfg_if::cfg_if; use once_cell::sync::OnceCell; use serde::{de::DeserializeOwned, Serialize}; use tokio::{ fs::{create_dir, remove_file, OpenOptions}, io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWriteExt}, }; #[cfg(feature = "replication")] use crate::node::persistence::snapshot::REPLICATION_FILE as SNAPSHOT_REPLICATION_FILE; use crate::{ config::persistence::PersistenceConfig, node::{ event::{Event, EventLog}, persistence::{snapshot::Snapshot, PersistenceError}, Queue, }, }; /// Queue log file name const QUEUE_FILE: &str = "queue_log"; /// Queue compacted log file name const QUEUE_COMPACTION_FILE: &str = "queue_compacted_log"; pub struct Log<'c> { /// Persistence config config: &'c PersistenceConfig<'c>, /// Internal instance of [`Snapshot`] driver /// /// Due to limitations of current replication storage implementation /// it is impossible to rely only on [`Log`] driver to save event log, /// so [`Snapshot`] driver is being used to fill the gap. snapshot: OnceCell<Snapshot<'c>>, } impl<'c> Log<'c> { pub fn new(config: &'c PersistenceConfig) -> Self { Log { config, snapshot: OnceCell::new(), } } /// Make log entry from serializable source /// /// Returns bytes buffer, filled with header (currently only with entry size) and serialized entry, without any offset between each other. /// ``` /// +---------+ /// |Entry len| /// +---------+ /// | | /// | Entry | /// | | /// +---------+ /// ``` fn make_log_entry<S>(source: &S) -> Result<Vec<u8>, PersistenceError> where S: Serialize, { let size = serialized_size(source).map_err(PersistenceError::SerializationError)?; debug!("Log entry size: {}", size); let capacity = size_of::<u64>() + TryInto::<usize>::try_into(size).map_err(PersistenceError::LogEntryTooBig)?; let mut buf = Vec::with_capacity(capacity); buf.extend(&size.to_le_bytes()); buf.resize(capacity, 0); serialize_into(&mut buf[size_of::<u64>()..], source) .map_err(PersistenceError::SerializationError)?; Ok(buf) } /// Get buffer of log entries from byte source async fn parse_log<T, S>(source: &mut S) -> Result<Vec<T>, PersistenceError> where T: DeserializeOwned, S: AsyncSeek + AsyncRead + Unpin, { let mut entries = Vec::new(); let source_size = source .seek(SeekFrom::End(0)) .await .map_err(PersistenceError::from)?; debug!("Log source size: {}", source_size); source .seek(SeekFrom::Start(0)) .await .map_err(PersistenceError::from)?; let mut buf = Vec::new(); while source .seek(SeekFrom::Current(0)) .await .map_err(PersistenceError::from)? < source_size
debug!("Log entry size: {}", size); buf.reserve(size.try_into().map_err(PersistenceError::LogEntryTooBig)?); source .take(size) .read_buf(&mut buf) .await .map_err(PersistenceError::from)?; entries.push(deserialize(&buf).map_err(PersistenceError::SerializationError)?); buf.clear(); } Ok(entries) } /// Appends [make_log_entry] result of `source` to `destination` /// /// [make_log_entry]: Log::make_log_entry async fn append<P, S>(&self, source: &S, destination: P) -> Result<(), PersistenceError> where P: AsRef<Path>, S: Serialize, { let path = self.config.path.join(destination); debug!("Appending to {}", path.display()); if let Some(parent) = path.parent() { if!parent.is_dir() { create_dir(&parent).await.map_err(PersistenceError::from)?; } } OpenOptions::new() .create(true) .append(true) .open(path) .await .map_err(PersistenceError::from)? .write_all(&Self::make_log_entry(source)?) .await .map_err(PersistenceError::from) } /// Get log entries from `source` log file using [parse_log] /// /// [parse_log]: Log::parse_log async fn load<S, P>(&self, source: P) -> Result<Vec<S>, PersistenceError> where S: DeserializeOwned, P: AsRef<Path>, { let path = self.config.path.join(source); debug!("Loading from {}", path.display()); let mut file = OpenOptions::new() .read(true) .open(path) .await .map_err(PersistenceError::from)?; Self::parse_log(&mut file).await } /// Append single event to `source` log file (usually queue name) pub async fn persist_event<P>( &self, event: &Event<'_>, source: P, ) -> Result<(), PersistenceError> where P: AsRef<Path>, { self.append(event, source.as_ref().join(QUEUE_FILE)).await } /// Restore database events from `source` log file (usually queue name) /// /// If specified in [`PersistenceConfig`], compaction will be executed after successful loading. pub async fn load_queue<P, DB>(&self, source: P) -> Result<Queue<DB>, PersistenceError> where P: AsRef<Path>, DB: EventLog<Vec<Event<'static>>> + Serialize + DeserializeOwned, { let events = match self .load::<Event, _>(source.as_ref().join(QUEUE_FILE)) .await { Ok(events) => events, Err(PersistenceError::FileOpenError(e)) => { error!("Log file not found: {}", e); Vec::new() } Err(e) => return Err(e), }; let database = if self.config.compaction { let compaction_path = source.as_ref().join(QUEUE_COMPACTION_FILE); let inner_db = match self.get_snapshot().load::<DB, _>(&compaction_path).await { Ok(mut database) => { database.apply_log(events); database } Err(PersistenceError::FileOpenError(e)) => { error!("Compaction file not found: {}", e); DB::from_log(events) } Err(e) => return Err(e), }; self.get_snapshot() .persist(&inner_db, &compaction_path) .await?; match self.prune(&source).await { Err(PersistenceError::FileOpenError(_)) | Ok(_) => (), Err(e) => return Err(e), }; inner_db } else { DB::from_log(events) }; cfg_if! { if #[cfg(feature = "replication")] { // Thanks to GC threshold, it's currently impossible to use log driver let replication_storage = match self.get_snapshot().load(source.as_ref().join(SNAPSHOT_REPLICATION_FILE)).await { Ok(storage) => storage, Err(PersistenceError::FileOpenError(e)) => { error!("{}", e); None }, Err(e) => return Err(e) }; let queue = Queue::new(database, replication_storage); } else { let queue = Queue::new(database); } } Ok(queue) } /// Prune `queue` log file async fn prune<P>(&self, queue: P) -> Result<(), PersistenceError> where P: AsRef<Path>, { let path = [&self.config.path, queue.as_ref(), QUEUE_FILE.as_ref()] .iter() .collect::<PathBuf>(); debug!("Pruning {}", path.display()); remove_file(path).await.map_err(PersistenceError::from) } /// Get shared [`Snapshot`] instance fn get_snapshot(&self) -> &Snapshot<'_> { self.snapshot.get_or_init(|| Snapshot::new(self.config)) } } #[cfg(test)] mod tests { use std::{borrow::Cow, io::Cursor}; use maybe_owned::MaybeOwned; use spartan_lib::core::{ db::TreeDatabase, dispatcher::StatusAwareDispatcher, message::{builder::MessageBuilder, Message}, payload::Dispatchable, }; use tempfile::{NamedTempFile, TempDir}; use super::*; use crate::{config::persistence::Persistence, node::DB}; #[tokio::test] async fn test_append_read() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); log.append(&String::from("Hello, world"), file.path()) .await .unwrap(); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert_eq!(entries.len(), 1); assert_eq!(entries.first().unwrap(), &String::from("Hello, world")); } #[tokio::test] async fn test_empty_file_load() { let file = NamedTempFile::new().unwrap(); let config = PersistenceConfig { path: Cow::Borrowed(file.path().parent().unwrap()), ..Default::default() }; let log = Log::new(&config); let entries = log.load::<String, _>(file.path()).await.unwrap(); assert!(entries.is_empty()); } #[tokio::test] async fn test_serialize_log_entry() { let entry = Log::make_log_entry(&vec![1u32, 2, 3]).unwrap(); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entry)) .await .unwrap(); assert_eq!(parsed.len(), 1); assert_eq!(&*parsed.first().unwrap(), &[1, 2, 3]); } #[tokio::test] async fn test_multiple_log_entries() { let mut entries = Vec::new(); entries.append(&mut Log::make_log_entry(&vec![1u32, 2, 3]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![4, 5, 6]).unwrap()); entries.append(&mut Log::make_log_entry(&vec![7, 8, 9]).unwrap()); let parsed = Log::parse_log::<Vec<u32>, _>(&mut Cursor::new(entries)) .await .unwrap(); assert_eq!(parsed.len(), 3); assert_eq!(parsed, vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); } #[tokio::test] async fn test_persist_and_restore_from_events() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: false, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); } #[tokio::test] async fn test_compaction() { let tempdir = TempDir::new().expect("Unable to create temporary test directory"); let event = Event::Push(MaybeOwned::Owned( MessageBuilder::default().body("Hello").compose().unwrap(), )); let config = PersistenceConfig { mode: Persistence::Log, path: Cow::Borrowed(tempdir.path()), timer: 0, compaction: true, }; let log = Log::new(&config); log.persist_event(&event, "test").await.unwrap(); let queue: DB = log.load_queue("test").await.unwrap(); assert_eq!(queue.database().await.pop().unwrap().body(), "Hello"); assert!(matches!( log.load::<Event, _>(Path::new("test").join(QUEUE_FILE)) .await .unwrap_err(), PersistenceError::FileOpenError(_) )); let snapshot = Snapshot::new(&config); let mut database: TreeDatabase<Message> = snapshot .load(Path::new("test").join(QUEUE_COMPACTION_FILE)) .await .unwrap(); assert_eq!(database.pop().unwrap().body(), "Hello"); } }
{ let size = source.read_u64_le().await.map_err(PersistenceError::from)?;
random_line_split
critical_cliques.rs
cliques.push(clique); } let mut crit_graph = Graph::new(cliques.len()); for c1 in 0..cliques.len() { for c2 in 0..cliques.len() { if c1 == c2 { continue; } if should_be_neighbors(g, &cliques[c1], &cliques[c2]) { crit_graph.set(c1, c2, Weight::ONE); } } } CritCliqueGraph { cliques, graph: crit_graph, } } fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool { for &u in &c1.vertices { for &v in &c2.vertices { if!g.has_edge(u, v) { return false; } } } true } /// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique /// graph and merging all critical cliques into a single vertex. /// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The /// reduced graph will be weighted however. pub fn merge_cliques( g: &Graph<Weight>, imap: &IndexMap, _path_log: &mut String, ) -> (Graph<Weight>, IndexMap) { let mut crit = build_crit_clique_graph(g); let mut crit_imap = IndexMap::empty(crit.graph.size()); for u in 0..crit.graph.size() { for v in (u + 1)..crit.graph.size() { //let uv = crit.graph.get_mut_direct(u, v); let uv = crit.graph.get(u, v); let sign = uv.signum(); let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len(); crit.graph.set(u, v, (weight as Weight) * sign); } crit_imap.set( u, crit.cliques[u] .vertices .iter() .flat_map(|v| imap[*v].iter().copied()) .collect(), ); if crit_imap[u].len() > 1 { append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]); } } (crit.graph, crit_imap) } // This kernel can only straightforwardly be applied to unweighted instances. // However, before even starting the parameter search, we reduce the unweighted graph by converting // it into a weighted one. Thus we cannot use this kernel at the moment. /* // Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010 pub fn apply_reductions( g: &mut Graph, imap: &mut IndexMap, k: &mut f32, edits: &mut Vec<Edit>, ) -> bool { let mut any_rules_applied = true; while any_rules_applied { any_rules_applied = false; let mut rule5_state = None; let crit = build_crit_clique_graph(g); for (clique_idx, clique) in crit.cliques.iter().enumerate() { let (clique_neighbors, clique_crit_neighbor_count) = get_clique_neighbors(g, clique_idx, &crit); let edit_set = calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors); let clique_len = clique.vertices.len(); let neighbors_len = clique_neighbors.len(); let total_edit_degree = edit_set.total_edit_degree; let rule1_applicable = clique_len as f32 > *k; let rule2_applicable = clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree; let mut rule3_applicable = false; let mut rule4_applicable = false; let mut rule4_vertex = None; let mut clique_neighbors2 = None; if!rule1_applicable &&!rule2_applicable { // Only calculate this if the other two aren't already true since it's a bit more work if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree { let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit); let threshold = (clique_len + neighbors_len) / 2; for &u in &neighbors2 { let count = count_intersection(g.neighbors(u), &clique_neighbors); if count > threshold { rule4_vertex = Some(u); break; } } if rule5_state.is_none() { rule5_state = Some(( clique.clone(), clique_neighbors.clone(), clique_crit_neighbor_count, neighbors2.clone(), )); } rule3_applicable = rule4_vertex.is_none(); rule4_applicable = rule4_vertex.is_some(); clique_neighbors2 = Some(neighbors2); } } if rule1_applicable || rule2_applicable || rule3_applicable { let has_reduced = make_clique_and_neighborhood_disjoint_and_remove( g, imap, k, edits, edit_set, &clique, &clique_neighbors, ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } if rule4_applicable { let has_reduced = apply_rule4( g, imap, k, edits, &clique_neighbors, &clique_neighbors2.unwrap(), rule4_vertex.unwrap(), ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } } if!any_rules_applied && rule5_state.is_some() { // If we got here, either no rule was applicable or they did not result in any further // reduction, but we found a case where rule 5 should now be applicable. // The paper claims that the above condition and the fact that the other rules // don#t reduce it further is sufficient to imply this condition. Let's check to be // safe for now :) // TODO: Might remove this check if I'm convinced it's safe. let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) = rule5_state.unwrap(); assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1); let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors); if!has_reduced { // All the other rules didn't apply, so we got here, and now 5 didn't do anything // either. We're done now. break; } any_rules_applied = true; } let new_count = g.present_node_count(); if new_count == g.size() { continue; } // Construct a new graph and imap with the vertices we marked for removal actually removed. The // new imap still maps from indices into that new graph to the vertices of the original graph // the algorithm got as input. // TODO: Figure out if it's necessary to do this every `while` iteration or if the // reductions are all still valid without it; would also be nice to avoid recomputing the // crit clique graph when it's not necessary. // TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger // scope rather than creating the graph here. if new_count == 0 { return true; } let mut new_g = Graph::new(new_count); let mut new_imap = IndexMap::new(new_count); let mut new_vertex = 0; let mut reverse_imap = vec![0; g.size()]; for u in 0..g.size() { if!g.is_present(u) { continue; } for v in g.neighbors(u) { if v > u { continue; } new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u)); } reverse_imap[u] = new_vertex; new_imap[new_vertex] = imap.take(u); new_vertex += 1; } *g = new_g; *imap = new_imap; } true } // TODO: COOOOMMMEEEENNNNTTTTSSSS!!!! /// Gets all the vertices that are neighbors of the critical clique, but not in the clique /// themselves. No specific order is guaranteed. fn get_clique_neighbors( g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph, ) -> (Vec<usize>, usize) { let crit_neighbors = crit_graph.graph.neighbors(clique_idx); let mut count = 0; let neighborhood = crit_neighbors .flat_map(|n| { count += 1; &crit_graph.cliques[n].vertices }) .copied() .filter(|&u| g.is_present(u)) .collect(); (neighborhood, count) } fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> { let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>(); crit_neighbors .iter() .flat_map(|&n| { crit_graph .graph .neighbors(n) .filter(|n2|!crit_neighbors.contains(n2)) .flat_map(|n2| &crit_graph.cliques[n2].vertices) }) .copied() .filter(|&u| g.is_present(u)) .collect() } fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize { let mut count = 0; for u in n1 { if n2.contains(&u) { count += 1; } } count } struct EditSet { inserts: Vec<(usize, usize)>, deletions: Vec<(usize, usize)>, total_edit_degree: usize, } fn calculate_edits_to_remove_clique_and_neighborhood( g: &Graph, clique: &CritClique, clique_neighbors: &[usize], ) -> EditSet { // Everything in the clique is already connected with the rest of the clique (it's a clique!). // All the neighbors are also connected to all the vertices in the clique, because all the // clique vertices have the *same set* of neighbors outside the clique (it's a *critical* // clique!). // So we only need to add edges between the different groups of neighbors. // // The only edges that we need to remove are between the neighbors of the clique to any nodes // that are neither in the neighbors nor the clique itself. (The vertices in the clique // obviously don't have any such neighbors, so there's nothing to remove.) let mut edits = EditSet { inserts: Vec::new(), deletions: Vec::new(), total_edit_degree: 0, }; for i in 0..clique_neighbors.len() { let u = clique_neighbors[i]; if!g.is_present(u) { continue; } // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let v = clique_neighbors[j]; if!g.is_present(v) { continue; } if g.get(u, v) < 0.0 { edits.inserts.push((u, v)); // Increase total degree twice: we only add the (u, v) edge once but it would be // counted in the edit degree for both u and v edits.total_edit_degree += 2; } } // Remove edges to unrelated vertices. // TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration // strategy to avoid the linear search here. for v in 0..g.size() { if u == v ||!g.is_present(v) { continue; } if clique_neighbors.contains(&v) || clique.vertices.contains(&v) { continue; } if g.get(u, v) > 0.0 { edits.deletions.push((u, v)); // Here the degree is only increased once: it would only count for u, since v isn't // even in the neighborhood and thus not considered. edits.total_edit_degree += 1; } } } edits } fn make_clique_and_neighborhood_disjoint_and_remove( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, edits_to_perform: EditSet, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { for (u, v) in edits_to_perform.inserts { let uv = g.get_mut(u, v); *k += *uv; Edit::insert(edits, &imap, u, v); *uv = f32::INFINITY; } for (u, v) in edits_to_perform.deletions { let uv = g.get_mut(u, v); *k -= *uv; Edit::delete(edits, &imap, u, v); *uv = f32::NEG_INFINITY; } // Now mark the clique and its neighbors as "removed" from the graph, so future reduction and // algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.) for &u in clique_neighbors { g.set_present(u, false); } for &u in &clique.vertices { g.set_present(u, false); } clique_neighbors.len() > 0 || clique.vertices.len() > 0 } fn apply_rule4( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique_neighbors: &[usize], clique_neighbors2: &[usize], u: usize, ) -> bool { // Insert edges in neighborhood to make clique+neighborhood a clique. let mut has_done_edit = false; for i in 0..clique_neighbors.len() { let v = clique_neighbors[i]; // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let w = clique_neighbors[j]; let vw = g.get_mut(v, w); if *vw < 0.0 { *k += *vw; Edit::insert(edits, &imap, v, w); *vw = f32::INFINITY; has_done_edit = true; } } } // Remove edges between clique_neighbors and clique_neighbors2-u for &v in clique_neighbors { for &w in clique_neighbors2 { if w == u { continue; } let vw = g.get_mut(v, w); if *vw > 0.0 { *k -= *vw; Edit::delete(edits, &imap, v, w); *vw = f32::NEG_INFINITY; has_done_edit = true; } } } has_done_edit } fn apply_rule5( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { // Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique| // verts. // Then, remove (clique + that set) from G, and set k = k - |clique|. // Note that the modification to k does not actually correspond directly to the edge edits we // do, but this is what the paper has proven to be correct *shrug*. let clique_size = clique.vertices.len(); let to_remove = clique .vertices .iter() .chain(clique_neighbors[..clique_size].iter()) .copied() .collect::<Vec<_>>(); for &u in &to_remove { g.set_present(u, false); for v in 0..g.size() { if!g.is_present(v) { continue; } let uv = g.get_mut(u, v); if *uv > 0.0 { Edit::delete(edits, imap, u, v); *uv = f32::NEG_INFINITY; } } } *k = *k - clique_size as f32; to_remove.len() > 0 } */ #[cfg(test)] mod tests { use super::*; #[test] fn crit_graph()
{ // This is the example from "Guo: A more effective linear kernelization for cluster // editing, 2009", Fig. 1 let mut graph = Graph::new(9); graph.set(0, 1, Weight::ONE); graph.set(0, 2, Weight::ONE); graph.set(1, 2, Weight::ONE); graph.set(2, 3, Weight::ONE); graph.set(2, 4, Weight::ONE); graph.set(3, 4, Weight::ONE); graph.set(3, 5, Weight::ONE); graph.set(3, 6, Weight::ONE); graph.set(4, 5, Weight::ONE); graph.set(4, 6, Weight::ONE); graph.set(5, 6, Weight::ONE); graph.set(5, 7, Weight::ONE); graph.set(5, 8, Weight::ONE); let crit = build_crit_clique_graph(&graph);
identifier_body
critical_cliques.rs
in (u + 1)..self.graph.size() { if self.graph.get(u, v) > Weight::ZERO { pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0); } } } pg } } pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph { let mut cliques = Vec::new(); // TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have // another look at making this more efficient. let mut visited = vec![false; g.size()]; for u in g.nodes() { if visited[u]
visited[u] = true; let mut clique = CritClique::default(); clique.vertices.push(u); for v in g.nodes() { if visited[v] { continue; } // TODO: Is it maybe worth storing neighbor sets instead of recomputing them? if g.closed_neighbors(u).eq(g.closed_neighbors(v)) { clique.vertices.push(v); visited[v] = true; } } cliques.push(clique); } let mut crit_graph = Graph::new(cliques.len()); for c1 in 0..cliques.len() { for c2 in 0..cliques.len() { if c1 == c2 { continue; } if should_be_neighbors(g, &cliques[c1], &cliques[c2]) { crit_graph.set(c1, c2, Weight::ONE); } } } CritCliqueGraph { cliques, graph: crit_graph, } } fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool { for &u in &c1.vertices { for &v in &c2.vertices { if!g.has_edge(u, v) { return false; } } } true } /// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique /// graph and merging all critical cliques into a single vertex. /// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The /// reduced graph will be weighted however. pub fn merge_cliques( g: &Graph<Weight>, imap: &IndexMap, _path_log: &mut String, ) -> (Graph<Weight>, IndexMap) { let mut crit = build_crit_clique_graph(g); let mut crit_imap = IndexMap::empty(crit.graph.size()); for u in 0..crit.graph.size() { for v in (u + 1)..crit.graph.size() { //let uv = crit.graph.get_mut_direct(u, v); let uv = crit.graph.get(u, v); let sign = uv.signum(); let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len(); crit.graph.set(u, v, (weight as Weight) * sign); } crit_imap.set( u, crit.cliques[u] .vertices .iter() .flat_map(|v| imap[*v].iter().copied()) .collect(), ); if crit_imap[u].len() > 1 { append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]); } } (crit.graph, crit_imap) } // This kernel can only straightforwardly be applied to unweighted instances. // However, before even starting the parameter search, we reduce the unweighted graph by converting // it into a weighted one. Thus we cannot use this kernel at the moment. /* // Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010 pub fn apply_reductions( g: &mut Graph, imap: &mut IndexMap, k: &mut f32, edits: &mut Vec<Edit>, ) -> bool { let mut any_rules_applied = true; while any_rules_applied { any_rules_applied = false; let mut rule5_state = None; let crit = build_crit_clique_graph(g); for (clique_idx, clique) in crit.cliques.iter().enumerate() { let (clique_neighbors, clique_crit_neighbor_count) = get_clique_neighbors(g, clique_idx, &crit); let edit_set = calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors); let clique_len = clique.vertices.len(); let neighbors_len = clique_neighbors.len(); let total_edit_degree = edit_set.total_edit_degree; let rule1_applicable = clique_len as f32 > *k; let rule2_applicable = clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree; let mut rule3_applicable = false; let mut rule4_applicable = false; let mut rule4_vertex = None; let mut clique_neighbors2 = None; if!rule1_applicable &&!rule2_applicable { // Only calculate this if the other two aren't already true since it's a bit more work if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree { let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit); let threshold = (clique_len + neighbors_len) / 2; for &u in &neighbors2 { let count = count_intersection(g.neighbors(u), &clique_neighbors); if count > threshold { rule4_vertex = Some(u); break; } } if rule5_state.is_none() { rule5_state = Some(( clique.clone(), clique_neighbors.clone(), clique_crit_neighbor_count, neighbors2.clone(), )); } rule3_applicable = rule4_vertex.is_none(); rule4_applicable = rule4_vertex.is_some(); clique_neighbors2 = Some(neighbors2); } } if rule1_applicable || rule2_applicable || rule3_applicable { let has_reduced = make_clique_and_neighborhood_disjoint_and_remove( g, imap, k, edits, edit_set, &clique, &clique_neighbors, ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } if rule4_applicable { let has_reduced = apply_rule4( g, imap, k, edits, &clique_neighbors, &clique_neighbors2.unwrap(), rule4_vertex.unwrap(), ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } } if!any_rules_applied && rule5_state.is_some() { // If we got here, either no rule was applicable or they did not result in any further // reduction, but we found a case where rule 5 should now be applicable. // The paper claims that the above condition and the fact that the other rules // don#t reduce it further is sufficient to imply this condition. Let's check to be // safe for now :) // TODO: Might remove this check if I'm convinced it's safe. let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) = rule5_state.unwrap(); assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1); let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors); if!has_reduced { // All the other rules didn't apply, so we got here, and now 5 didn't do anything // either. We're done now. break; } any_rules_applied = true; } let new_count = g.present_node_count(); if new_count == g.size() { continue; } // Construct a new graph and imap with the vertices we marked for removal actually removed. The // new imap still maps from indices into that new graph to the vertices of the original graph // the algorithm got as input. // TODO: Figure out if it's necessary to do this every `while` iteration or if the // reductions are all still valid without it; would also be nice to avoid recomputing the // crit clique graph when it's not necessary. // TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger // scope rather than creating the graph here. if new_count == 0 { return true; } let mut new_g = Graph::new(new_count); let mut new_imap = IndexMap::new(new_count); let mut new_vertex = 0; let mut reverse_imap = vec![0; g.size()]; for u in 0..g.size() { if!g.is_present(u) { continue; } for v in g.neighbors(u) { if v > u { continue; } new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u)); } reverse_imap[u] = new_vertex; new_imap[new_vertex] = imap.take(u); new_vertex += 1; } *g = new_g; *imap = new_imap; } true } // TODO: COOOOMMMEEEENNNNTTTTSSSS!!!! /// Gets all the vertices that are neighbors of the critical clique, but not in the clique /// themselves. No specific order is guaranteed. fn get_clique_neighbors( g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph, ) -> (Vec<usize>, usize) { let crit_neighbors = crit_graph.graph.neighbors(clique_idx); let mut count = 0; let neighborhood = crit_neighbors .flat_map(|n| { count += 1; &crit_graph.cliques[n].vertices }) .copied() .filter(|&u| g.is_present(u)) .collect(); (neighborhood, count) } fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> { let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>(); crit_neighbors .iter() .flat_map(|&n| { crit_graph .graph .neighbors(n) .filter(|n2|!crit_neighbors.contains(n2)) .flat_map(|n2| &crit_graph.cliques[n2].vertices) }) .copied() .filter(|&u| g.is_present(u)) .collect() } fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize { let mut count = 0; for u in n1 { if n2.contains(&u) { count += 1; } } count } struct EditSet { inserts: Vec<(usize, usize)>, deletions: Vec<(usize, usize)>, total_edit_degree: usize, } fn calculate_edits_to_remove_clique_and_neighborhood( g: &Graph, clique: &CritClique, clique_neighbors: &[usize], ) -> EditSet { // Everything in the clique is already connected with the rest of the clique (it's a clique!). // All the neighbors are also connected to all the vertices in the clique, because all the // clique vertices have the *same set* of neighbors outside the clique (it's a *critical* // clique!). // So we only need to add edges between the different groups of neighbors. // // The only edges that we need to remove are between the neighbors of the clique to any nodes // that are neither in the neighbors nor the clique itself. (The vertices in the clique // obviously don't have any such neighbors, so there's nothing to remove.) let mut edits = EditSet { inserts: Vec::new(), deletions: Vec::new(), total_edit_degree: 0, }; for i in 0..clique_neighbors.len() { let u = clique_neighbors[i]; if!g.is_present(u) { continue; } // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let v = clique_neighbors[j]; if!g.is_present(v) { continue; } if g.get(u, v) < 0.0 { edits.inserts.push((u, v)); // Increase total degree twice: we only add the (u, v) edge once but it would be // counted in the edit degree for both u and v edits.total_edit_degree += 2; } } // Remove edges to unrelated vertices. // TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration // strategy to avoid the linear search here. for v in 0..g.size() { if u == v ||!g.is_present(v) { continue; } if clique_neighbors.contains(&v) || clique.vertices.contains(&v) { continue; } if g.get(u, v) > 0.0 { edits.deletions.push((u, v)); // Here the degree is only increased once: it would only count for u, since v isn't // even in the neighborhood and thus not considered. edits.total_edit_degree += 1; } } } edits } fn make_clique_and_neighborhood_disjoint_and_remove( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, edits_to_perform: EditSet, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { for (u, v) in edits_to_perform.inserts { let uv = g.get_mut(u, v); *k += *uv; Edit::insert(edits, &imap, u, v); *uv = f32::INFINITY; } for (u, v) in edits_to_perform.deletions { let uv = g.get_mut(u, v); *k -= *uv; Edit::delete(edits, &imap, u, v); *uv = f32::NEG_INFINITY; } // Now mark the clique and its neighbors as "removed" from the graph, so future reduction and // algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.) for &u in clique_neighbors { g.set_present(u, false); } for &u in &clique.vertices { g.set_present(u, false); } clique_neighbors.len() > 0 || clique.vertices.len() > 0 } fn apply_rule4( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique_neighbors: &[usize], clique_neighbors2: &[usize], u: usize, ) -> bool { // Insert edges in neighborhood to make clique+neighborhood a clique. let mut has_done_edit = false; for i in 0..clique_neighbors.len() { let v = clique_neighbors[i]; // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let w = clique_neighbors[j]; let vw = g.get_mut(v, w); if *vw < 0.0 { *k += *vw; Edit::insert(edits, &imap, v, w); *vw = f32::INFINITY; has_done_edit = true; } } } // Remove edges between clique_neighbors and clique_neighbors2-u for &v in clique_neighbors { for &w in clique_neighbors2 { if w == u { continue; } let vw = g.get_mut(v, w); if *vw > 0.0 { *k -= *vw; Edit::delete(edits, &imap, v, w); *vw = f32::NEG_INFINITY; has_done_edit = true; } } } has_done_edit } fn apply_rule5( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { // Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique| // verts. // Then, remove (clique + that set) from G, and set k = k - |clique|. // Note that the modification to k does not actually correspond directly to the edge edits we // do, but this is what the paper has proven to be correct *shrug*. let clique_size = clique.vertices.len(); let to_remove = clique .vertices .iter() .chain(clique_neighbors[..clique_size].iter()) .copied() .collect::<Vec<_>>(); for &u in &to_remove { g.set_present(u, false); for v in 0..g.size() { if!g.is_present(v) { continue; }
{ continue; }
conditional_block
critical_cliques.rs
(&self) -> petgraph::Graph<String, u8, petgraph::Undirected, u32> { use petgraph::prelude::NodeIndex; let mut pg = petgraph::Graph::with_capacity(self.graph.size(), 0); for u in 0..self.graph.size() { pg.add_node( self.cliques[u] .vertices .iter() .map(|i| i.to_string()) .collect::<Vec<_>>() .join(", "), ); } for u in 0..self.graph.size() { for v in (u + 1)..self.graph.size() { if self.graph.get(u, v) > Weight::ZERO { pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0); } } } pg } } pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph { let mut cliques = Vec::new(); // TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have // another look at making this more efficient. let mut visited = vec![false; g.size()]; for u in g.nodes() { if visited[u] { continue; } visited[u] = true; let mut clique = CritClique::default(); clique.vertices.push(u); for v in g.nodes() { if visited[v] { continue; } // TODO: Is it maybe worth storing neighbor sets instead of recomputing them? if g.closed_neighbors(u).eq(g.closed_neighbors(v)) { clique.vertices.push(v); visited[v] = true; } } cliques.push(clique); } let mut crit_graph = Graph::new(cliques.len()); for c1 in 0..cliques.len() { for c2 in 0..cliques.len() { if c1 == c2 { continue; } if should_be_neighbors(g, &cliques[c1], &cliques[c2]) { crit_graph.set(c1, c2, Weight::ONE); } } } CritCliqueGraph { cliques, graph: crit_graph, } } fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool { for &u in &c1.vertices { for &v in &c2.vertices { if!g.has_edge(u, v) { return false; } } } true } /// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique /// graph and merging all critical cliques into a single vertex. /// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The /// reduced graph will be weighted however. pub fn merge_cliques( g: &Graph<Weight>, imap: &IndexMap, _path_log: &mut String, ) -> (Graph<Weight>, IndexMap) { let mut crit = build_crit_clique_graph(g); let mut crit_imap = IndexMap::empty(crit.graph.size()); for u in 0..crit.graph.size() { for v in (u + 1)..crit.graph.size() { //let uv = crit.graph.get_mut_direct(u, v); let uv = crit.graph.get(u, v); let sign = uv.signum(); let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len(); crit.graph.set(u, v, (weight as Weight) * sign); } crit_imap.set( u, crit.cliques[u] .vertices .iter() .flat_map(|v| imap[*v].iter().copied()) .collect(), ); if crit_imap[u].len() > 1 { append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]); } } (crit.graph, crit_imap) } // This kernel can only straightforwardly be applied to unweighted instances. // However, before even starting the parameter search, we reduce the unweighted graph by converting // it into a weighted one. Thus we cannot use this kernel at the moment. /* // Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010 pub fn apply_reductions( g: &mut Graph, imap: &mut IndexMap, k: &mut f32, edits: &mut Vec<Edit>, ) -> bool { let mut any_rules_applied = true; while any_rules_applied { any_rules_applied = false; let mut rule5_state = None; let crit = build_crit_clique_graph(g); for (clique_idx, clique) in crit.cliques.iter().enumerate() { let (clique_neighbors, clique_crit_neighbor_count) = get_clique_neighbors(g, clique_idx, &crit); let edit_set = calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors); let clique_len = clique.vertices.len(); let neighbors_len = clique_neighbors.len(); let total_edit_degree = edit_set.total_edit_degree; let rule1_applicable = clique_len as f32 > *k; let rule2_applicable = clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree; let mut rule3_applicable = false; let mut rule4_applicable = false; let mut rule4_vertex = None; let mut clique_neighbors2 = None; if!rule1_applicable &&!rule2_applicable { // Only calculate this if the other two aren't already true since it's a bit more work if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree { let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit); let threshold = (clique_len + neighbors_len) / 2; for &u in &neighbors2 { let count = count_intersection(g.neighbors(u), &clique_neighbors); if count > threshold { rule4_vertex = Some(u); break; } } if rule5_state.is_none() { rule5_state = Some(( clique.clone(), clique_neighbors.clone(), clique_crit_neighbor_count, neighbors2.clone(), )); } rule3_applicable = rule4_vertex.is_none(); rule4_applicable = rule4_vertex.is_some(); clique_neighbors2 = Some(neighbors2); } } if rule1_applicable || rule2_applicable || rule3_applicable { let has_reduced = make_clique_and_neighborhood_disjoint_and_remove( g, imap, k, edits, edit_set, &clique, &clique_neighbors, ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } if rule4_applicable { let has_reduced = apply_rule4( g, imap, k, edits, &clique_neighbors, &clique_neighbors2.unwrap(), rule4_vertex.unwrap(), ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } } if!any_rules_applied && rule5_state.is_some() { // If we got here, either no rule was applicable or they did not result in any further // reduction, but we found a case where rule 5 should now be applicable. // The paper claims that the above condition and the fact that the other rules // don#t reduce it further is sufficient to imply this condition. Let's check to be // safe for now :) // TODO: Might remove this check if I'm convinced it's safe. let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) = rule5_state.unwrap(); assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1); let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors); if!has_reduced { // All the other rules didn't apply, so we got here, and now 5 didn't do anything // either. We're done now. break; } any_rules_applied = true; } let new_count = g.present_node_count(); if new_count == g.size() { continue; } // Construct a new graph and imap with the vertices we marked for removal actually removed. The // new imap still maps from indices into that new graph to the vertices of the original graph // the algorithm got as input. // TODO: Figure out if it's necessary to do this every `while` iteration or if the // reductions are all still valid without it; would also be nice to avoid recomputing the // crit clique graph when it's not necessary. // TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger // scope rather than creating the graph here. if new_count == 0 { return true; } let mut new_g = Graph::new(new_count); let mut new_imap = IndexMap::new(new_count); let mut new_vertex = 0; let mut reverse_imap = vec![0; g.size()]; for u in 0..g.size() { if!g.is_present(u) { continue; } for v in g.neighbors(u) { if v > u { continue; } new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u)); } reverse_imap[u] = new_vertex; new_imap[new_vertex] = imap.take(u); new_vertex += 1; } *g = new_g; *imap = new_imap; } true } // TODO: COOOOMMMEEEENNNNTTTTSSSS!!!! /// Gets all the vertices that are neighbors of the critical clique, but not in the clique /// themselves. No specific order is guaranteed. fn get_clique_neighbors( g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph, ) -> (Vec<usize>, usize) { let crit_neighbors = crit_graph.graph.neighbors(clique_idx); let mut count = 0; let neighborhood = crit_neighbors .flat_map(|n| { count += 1; &crit_graph.cliques[n].vertices }) .copied() .filter(|&u| g.is_present(u)) .collect(); (neighborhood, count) } fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> { let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>(); crit_neighbors .iter() .flat_map(|&n| { crit_graph .graph .neighbors(n) .filter(|n2|!crit_neighbors.contains(n2)) .flat_map(|n2| &crit_graph.cliques[n2].vertices) }) .copied() .filter(|&u| g.is_present(u)) .collect() } fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize { let mut count = 0; for u in n1 { if n2.contains(&u) { count += 1; } } count } struct EditSet { inserts: Vec<(usize, usize)>, deletions: Vec<(usize, usize)>, total_edit_degree: usize, } fn calculate_edits_to_remove_clique_and_neighborhood( g: &Graph, clique: &CritClique, clique_neighbors: &[usize], ) -> EditSet { // Everything in the clique is already connected with the rest of the clique (it's a clique!). // All the neighbors are also connected to all the vertices in the clique, because all the // clique vertices have the *same set* of neighbors outside the clique (it's a *critical* // clique!). // So we only need to add edges between the different groups of neighbors. // // The only edges that we need to remove are between the neighbors of the clique to any nodes // that are neither in the neighbors nor the clique itself. (The vertices in the clique // obviously don't have any such neighbors, so there's nothing to remove.) let mut edits = EditSet { inserts: Vec::new(), deletions: Vec::new(), total_edit_degree: 0, }; for i in 0..clique_neighbors.len() { let u = clique_neighbors[i]; if!g.is_present(u) { continue; } // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let v = clique_neighbors[j]; if!g.is_present(v) { continue; } if g.get(u, v) < 0.0 { edits.inserts.push((u, v)); // Increase total degree twice: we only add the (u, v) edge once but it would be // counted in the edit degree for both u and v edits.total_edit_degree += 2; } } // Remove edges to unrelated vertices. // TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration // strategy to avoid the linear search here. for v in 0..g.size() { if u == v ||!g.is_present(v) { continue; } if clique_neighbors.contains(&v) || clique.vertices.contains(&v) { continue; } if g.get(u, v) > 0.0 { edits.deletions.push((u, v)); // Here the degree is only increased once: it would only count for u, since v isn't // even in the neighborhood and thus not considered. edits.total_edit_degree += 1; } } } edits } fn make_clique_and_neighborhood_disjoint_and_remove( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, edits_to_perform: EditSet, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { for (u, v) in edits_to_perform.inserts { let uv = g.get_mut(u, v); *k += *uv; Edit::insert(edits, &imap, u, v); *uv = f32::INFINITY; } for (u, v) in edits_to_perform.deletions { let uv = g.get_mut(u, v); *k -= *uv; Edit::delete(edits, &imap, u, v); *uv = f32::NEG_INFINITY; } // Now mark the clique and its neighbors as "removed" from the graph, so future reduction and // algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.) for &u in clique_neighbors { g.set_present(u, false); } for &u in &clique.vertices { g.set_present(u, false); } clique_neighbors.len() > 0 || clique.vertices.len() > 0 } fn apply_rule4( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique_neighbors: &[usize], clique_neighbors2: &[usize], u: usize, ) -> bool { // Insert edges in neighborhood to make clique+neighborhood a clique. let mut has_done_edit = false; for i in 0..clique_neighbors.len() { let v = clique_neighbors[i]; // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let w = clique_neighbors[j]; let vw = g.get_mut(v, w); if *vw < 0.0 { *k += *vw; Edit::insert(edits, &imap, v, w); *vw = f32::INFINITY; has_done_edit = true; } } } // Remove edges between clique_neighbors and clique_neighbors2-u for &v in clique_neighbors { for &w in clique_neighbors2 { if w == u { continue; } let vw = g.get_mut(v, w); if *vw > 0.0 { *k -= *vw; Edit::delete(edits, &imap, v, w); *vw = f32::NEG_INFINITY; has_done_edit = true; } } } has_done_edit } fn apply_rule5( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique: &CritClique, clique_neighbors: &[usize], ) -> bool {
to_petgraph
identifier_name
critical_cliques.rs
v in (u + 1)..self.graph.size() { if self.graph.get(u, v) > Weight::ZERO { pg.add_edge(NodeIndex::new(u), NodeIndex::new(v), 0); } } } pg } } pub fn build_crit_clique_graph(g: &Graph<Weight>) -> CritCliqueGraph { let mut cliques = Vec::new(); // TODO: This looks at least O(n^2) but should apparently be do-able in O(n + m), so have // another look at making this more efficient. let mut visited = vec![false; g.size()]; for u in g.nodes() { if visited[u] { continue; } visited[u] = true; let mut clique = CritClique::default(); clique.vertices.push(u); for v in g.nodes() { if visited[v] { continue; } // TODO: Is it maybe worth storing neighbor sets instead of recomputing them? if g.closed_neighbors(u).eq(g.closed_neighbors(v)) { clique.vertices.push(v); visited[v] = true; } } cliques.push(clique); } let mut crit_graph = Graph::new(cliques.len()); for c1 in 0..cliques.len() { for c2 in 0..cliques.len() { if c1 == c2 { continue; } if should_be_neighbors(g, &cliques[c1], &cliques[c2]) { crit_graph.set(c1, c2, Weight::ONE); } } } CritCliqueGraph { cliques, graph: crit_graph, } } fn should_be_neighbors(g: &Graph<Weight>, c1: &CritClique, c2: &CritClique) -> bool { for &u in &c1.vertices { for &v in &c2.vertices { if!g.has_edge(u, v) { return false; } } } true } /// Performs a parameter-independent reduction on the graph `g` by constructing the critical clique /// graph and merging all critical cliques into a single vertex. /// This assumes that the input graph is unweighted (i.e. all weights are +1 or -1 exactly). The /// reduced graph will be weighted however. pub fn merge_cliques( g: &Graph<Weight>, imap: &IndexMap, _path_log: &mut String, ) -> (Graph<Weight>, IndexMap) { let mut crit = build_crit_clique_graph(g); let mut crit_imap = IndexMap::empty(crit.graph.size()); for u in 0..crit.graph.size() { for v in (u + 1)..crit.graph.size() { //let uv = crit.graph.get_mut_direct(u, v); let uv = crit.graph.get(u, v); let sign = uv.signum(); let weight = crit.cliques[u].vertices.len() * crit.cliques[v].vertices.len(); crit.graph.set(u, v, (weight as Weight) * sign); } crit_imap.set( u, crit.cliques[u] .vertices .iter() .flat_map(|v| imap[*v].iter().copied()) .collect(), ); if crit_imap[u].len() > 1 { append_path_log_dir!(_path_log, "critcliques, merged {:?}\n", crit_imap[u]); } } (crit.graph, crit_imap) } // This kernel can only straightforwardly be applied to unweighted instances. // However, before even starting the parameter search, we reduce the unweighted graph by converting // it into a weighted one. Thus we cannot use this kernel at the moment. /* // Chen and Meng: A 2k Kernel for the Cluster Editing Problem, 2010 pub fn apply_reductions( g: &mut Graph, imap: &mut IndexMap, k: &mut f32, edits: &mut Vec<Edit>, ) -> bool { let mut any_rules_applied = true; while any_rules_applied { any_rules_applied = false; let mut rule5_state = None; let crit = build_crit_clique_graph(g); for (clique_idx, clique) in crit.cliques.iter().enumerate() { let (clique_neighbors, clique_crit_neighbor_count) = get_clique_neighbors(g, clique_idx, &crit); let edit_set = calculate_edits_to_remove_clique_and_neighborhood(g, clique, &clique_neighbors); let clique_len = clique.vertices.len(); let neighbors_len = clique_neighbors.len(); let total_edit_degree = edit_set.total_edit_degree; let rule1_applicable = clique_len as f32 > *k; let rule2_applicable = clique_len >= neighbors_len && clique_len + neighbors_len > total_edit_degree; let mut rule3_applicable = false; let mut rule4_applicable = false; let mut rule4_vertex = None; let mut clique_neighbors2 = None; if!rule1_applicable &&!rule2_applicable { // Only calculate this if the other two aren't already true since it's a bit more work if clique_len < neighbors_len && clique_len + neighbors_len > total_edit_degree { let neighbors2 = get_clique_neighbors2(g, clique_idx, &crit); let threshold = (clique_len + neighbors_len) / 2; for &u in &neighbors2 { let count = count_intersection(g.neighbors(u), &clique_neighbors); if count > threshold { rule4_vertex = Some(u); break; } } if rule5_state.is_none() { rule5_state = Some(( clique.clone(), clique_neighbors.clone(), clique_crit_neighbor_count, neighbors2.clone(), )); } rule3_applicable = rule4_vertex.is_none(); rule4_applicable = rule4_vertex.is_some(); clique_neighbors2 = Some(neighbors2); } } if rule1_applicable || rule2_applicable || rule3_applicable { let has_reduced = make_clique_and_neighborhood_disjoint_and_remove( g, imap, k, edits, edit_set, &clique, &clique_neighbors, ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } if rule4_applicable { let has_reduced = apply_rule4( g, imap, k, edits, &clique_neighbors, &clique_neighbors2.unwrap(), rule4_vertex.unwrap(), ); if *k < 0.0 { return false; } if has_reduced { any_rules_applied = true; break; } } } if!any_rules_applied && rule5_state.is_some() { // If we got here, either no rule was applicable or they did not result in any further // reduction, but we found a case where rule 5 should now be applicable. // The paper claims that the above condition and the fact that the other rules // don#t reduce it further is sufficient to imply this condition. Let's check to be // safe for now :) // TODO: Might remove this check if I'm convinced it's safe. let (clique, clique_neighbors, clique_crit_neighbor_count, clique_neighbors2) = rule5_state.unwrap(); assert!(clique_crit_neighbor_count == 1 && clique_neighbors2.len() == 1); let has_reduced = apply_rule5(g, imap, k, edits, &clique, &clique_neighbors); if!has_reduced { // All the other rules didn't apply, so we got here, and now 5 didn't do anything // either. We're done now. break; } any_rules_applied = true; } let new_count = g.present_node_count(); if new_count == g.size() { continue; } // Construct a new graph and imap with the vertices we marked for removal actually removed. The // new imap still maps from indices into that new graph to the vertices of the original graph // the algorithm got as input. // TODO: Figure out if it's necessary to do this every `while` iteration or if the // reductions are all still valid without it; would also be nice to avoid recomputing the // crit clique graph when it's not necessary. // TODO: Possibly test whether it's faster to just keep the removed_g map around in a larger // scope rather than creating the graph here. if new_count == 0 { return true; } let mut new_g = Graph::new(new_count); let mut new_imap = IndexMap::new(new_count); let mut new_vertex = 0; let mut reverse_imap = vec![0; g.size()]; for u in 0..g.size() { if!g.is_present(u) { continue; } for v in g.neighbors(u) { if v > u { continue; } new_g.set_direct(reverse_imap[v], new_vertex, g.get_direct(v, u)); } reverse_imap[u] = new_vertex; new_imap[new_vertex] = imap.take(u); new_vertex += 1; } *g = new_g; *imap = new_imap; } true } // TODO: COOOOMMMEEEENNNNTTTTSSSS!!!! /// Gets all the vertices that are neighbors of the critical clique, but not in the clique /// themselves. No specific order is guaranteed. fn get_clique_neighbors( g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph, ) -> (Vec<usize>, usize) { let crit_neighbors = crit_graph.graph.neighbors(clique_idx); let mut count = 0; let neighborhood = crit_neighbors .flat_map(|n| { count += 1; &crit_graph.cliques[n].vertices }) .copied() .filter(|&u| g.is_present(u)) .collect(); (neighborhood, count) } fn get_clique_neighbors2(g: &Graph, clique_idx: usize, crit_graph: &CritCliqueGraph) -> Vec<usize> { let crit_neighbors = crit_graph.graph.neighbors(clique_idx).collect::<Vec<_>>(); crit_neighbors .iter() .flat_map(|&n| { crit_graph .graph .neighbors(n) .filter(|n2|!crit_neighbors.contains(n2)) .flat_map(|n2| &crit_graph.cliques[n2].vertices) }) .copied() .filter(|&u| g.is_present(u)) .collect() } fn count_intersection(n1: impl Iterator<Item = usize>, n2: &[usize]) -> usize { let mut count = 0; for u in n1 { if n2.contains(&u) { count += 1; } } count } struct EditSet { inserts: Vec<(usize, usize)>, deletions: Vec<(usize, usize)>, total_edit_degree: usize, } fn calculate_edits_to_remove_clique_and_neighborhood( g: &Graph, clique: &CritClique, clique_neighbors: &[usize], ) -> EditSet { // Everything in the clique is already connected with the rest of the clique (it's a clique!). // All the neighbors are also connected to all the vertices in the clique, because all the // clique vertices have the *same set* of neighbors outside the clique (it's a *critical* // clique!). // So we only need to add edges between the different groups of neighbors. // // The only edges that we need to remove are between the neighbors of the clique to any nodes // that are neither in the neighbors nor the clique itself. (The vertices in the clique // obviously don't have any such neighbors, so there's nothing to remove.) let mut edits = EditSet { inserts: Vec::new(), deletions: Vec::new(), total_edit_degree: 0, }; for i in 0..clique_neighbors.len() { let u = clique_neighbors[i]; if!g.is_present(u) { continue; } // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let v = clique_neighbors[j]; if!g.is_present(v) { continue; } if g.get(u, v) < 0.0 { edits.inserts.push((u, v)); // Increase total degree twice: we only add the (u, v) edge once but it would be // counted in the edit degree for both u and v edits.total_edit_degree += 2; } } // Remove edges to unrelated vertices. // TODO: Try using a BTreeSet for neighbors and vertices, or using some kind of other iteration // strategy to avoid the linear search here. for v in 0..g.size() { if u == v ||!g.is_present(v) { continue; } if clique_neighbors.contains(&v) || clique.vertices.contains(&v) { continue; } if g.get(u, v) > 0.0 { edits.deletions.push((u, v)); // Here the degree is only increased once: it would only count for u, since v isn't // even in the neighborhood and thus not considered. edits.total_edit_degree += 1; } } } edits } fn make_clique_and_neighborhood_disjoint_and_remove( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, edits_to_perform: EditSet, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { for (u, v) in edits_to_perform.inserts { let uv = g.get_mut(u, v); *k += *uv; Edit::insert(edits, &imap, u, v); *uv = f32::INFINITY; } for (u, v) in edits_to_perform.deletions { let uv = g.get_mut(u, v);
// Now mark the clique and its neighbors as "removed" from the graph, so future reduction and // algorithm steps ignore it. (It is now a disjoint clique, i.e. already done.) for &u in clique_neighbors { g.set_present(u, false); } for &u in &clique.vertices { g.set_present(u, false); } clique_neighbors.len() > 0 || clique.vertices.len() > 0 } fn apply_rule4( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique_neighbors: &[usize], clique_neighbors2: &[usize], u: usize, ) -> bool { // Insert edges in neighborhood to make clique+neighborhood a clique. let mut has_done_edit = false; for i in 0..clique_neighbors.len() { let v = clique_neighbors[i]; // Add edges to other clique neighbors. for j in (i + 1)..clique_neighbors.len() { let w = clique_neighbors[j]; let vw = g.get_mut(v, w); if *vw < 0.0 { *k += *vw; Edit::insert(edits, &imap, v, w); *vw = f32::INFINITY; has_done_edit = true; } } } // Remove edges between clique_neighbors and clique_neighbors2-u for &v in clique_neighbors { for &w in clique_neighbors2 { if w == u { continue; } let vw = g.get_mut(v, w); if *vw > 0.0 { *k -= *vw; Edit::delete(edits, &imap, v, w); *vw = f32::NEG_INFINITY; has_done_edit = true; } } } has_done_edit } fn apply_rule5( g: &mut Graph, imap: &IndexMap, k: &mut f32, edits: &mut Vec<Edit>, clique: &CritClique, clique_neighbors: &[usize], ) -> bool { // Can pick any set of |clique| vertices in clique_neighbors, we'll just use the first |clique| // verts. // Then, remove (clique + that set) from G, and set k = k - |clique|. // Note that the modification to k does not actually correspond directly to the edge edits we // do, but this is what the paper has proven to be correct *shrug*. let clique_size = clique.vertices.len(); let to_remove = clique .vertices .iter() .chain(clique_neighbors[..clique_size].iter()) .copied() .collect::<Vec<_>>(); for &u in &to_remove { g.set_present(u, false); for v in 0..g.size() { if!g.is_present(v) { continue; }
*k -= *uv; Edit::delete(edits, &imap, u, v); *uv = f32::NEG_INFINITY; }
random_line_split
config_diff.rs
use std::num::NonZeroU32; use merge::Merge; use schemars::JsonSchema; use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use serde_json::Value; use validator::{Validate, ValidationErrors}; use crate::config::{CollectionParams, WalConfig}; use crate::operations::types::CollectionResult; use crate::optimizers_builder::OptimizersConfig; // Structures for partial update of collection params // TODO: make auto-generated somehow... pub trait DiffConfig<T: DeserializeOwned + Serialize> { /// Update the given `config` with fields in this diff /// /// This clones, modifies and returns `config`. /// /// This diff has higher priority, meaning that fields specified in this diff will always be in /// the returned object. fn update(self, config: &T) -> CollectionResult<T> where Self: Sized + Serialize + DeserializeOwned + Merge, { update_config(config, self) } fn from_full(full: &T) -> CollectionResult<Self> where Self: Sized + Serialize + DeserializeOwned,
} #[derive( Debug, Default, Deserialize, Serialize, JsonSchema, Validate, Copy, Clone, PartialEq, Eq, Merge, Hash, )] #[serde(rename_all = "snake_case")] pub struct HnswConfigDiff { /// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. #[serde(skip_serializing_if = "Option::is_none")] pub m: Option<usize>, /// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. #[validate(range(min = 4))] #[serde(skip_serializing_if = "Option::is_none")] pub ef_construct: Option<usize>, /// Minimal size (in kilobytes) of vectors for additional payload-based indexing. /// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - /// in this case full-scan search should be preferred by query planner and additional indexing is not required. /// Note: 1Kb = 1 vector of size 256 #[serde( alias = "full_scan_threshold_kb", default, skip_serializing_if = "Option::is_none" )] #[validate(range(min = 10))] pub full_scan_threshold: Option<usize>, /// Number of parallel threads used for background index building. If 0 - auto selection. #[serde(default, skip_serializing_if = "Option::is_none")] pub max_indexing_threads: Option<usize>, /// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false #[serde(default, skip_serializing_if = "Option::is_none")] pub on_disk: Option<bool>, /// Custom M param for additional payload-aware HNSW links. If not set, default M will be used. #[serde(default, skip_serializing_if = "Option::is_none")] pub payload_m: Option<usize>, } #[derive( Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash, )] pub struct WalConfigDiff { /// Size of a single WAL segment in MB #[validate(range(min = 1))] pub wal_capacity_mb: Option<usize>, /// Number of WAL segments to create ahead of actually used ones pub wal_segments_ahead: Option<usize>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)] pub struct CollectionParamsDiff { /// Number of replicas for each shard pub replication_factor: Option<NonZeroU32>, /// Minimal number successful responses from replicas to consider operation successful pub write_consistency_factor: Option<NonZeroU32>, /// If true - point's payload will not be stored in memory. /// It will be read from the disk every time it is requested. /// This setting saves RAM by (slightly) increasing the response time. /// Note: those payload values that are involved in filtering and are indexed - remain in RAM. #[serde(default)] pub on_disk_payload: Option<bool>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)] pub struct OptimizersConfigDiff { /// The minimal fraction of deleted vectors in a segment, required to perform segment optimization pub deleted_threshold: Option<f64>, /// The minimal number of vectors in a segment, required to perform segment optimization pub vacuum_min_vector_number: Option<usize>, /// Target amount of segments optimizer will try to keep. /// Real amount of segments may vary depending on multiple parameters: /// - Amount of stored points /// - Current write RPS /// /// It is recommended to select default number of segments as a factor of the number of search threads, /// so that each segment would be handled evenly by one of the threads /// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs pub default_segment_number: Option<usize>, /// Do not create segments larger this size (in kilobytes). /// Large segments might require disproportionately long indexation times, /// therefore it makes sense to limit the size of segments. /// /// If indexation speed have more priority for your - make this parameter lower. /// If search speed is more important - make this parameter higher. /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "max_segment_size_kb")] pub max_segment_size: Option<usize>, /// Maximum size (in kilobytes) of vectors to store in-memory per segment. /// Segments larger than this threshold will be stored as read-only memmaped file. /// /// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. /// /// To disable memmap storage, set this to `0`. /// /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "memmap_threshold_kb")] pub memmap_threshold: Option<usize>, /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing /// /// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>. /// /// To disable vector indexing, set to `0`. /// /// Note: 1kB = 1 vector of size 256. #[serde(alias = "indexing_threshold_kb")] pub indexing_threshold: Option<usize>, /// Minimum interval between forced flushes. pub flush_interval_sec: Option<u64>, /// Maximum available threads for optimization workers pub max_optimization_threads: Option<usize>, } impl std::hash::Hash for OptimizersConfigDiff { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.deleted_threshold.map(f64::to_le_bytes).hash(state); self.vacuum_min_vector_number.hash(state); self.default_segment_number.hash(state); self.max_segment_size.hash(state); self.memmap_threshold.hash(state); self.indexing_threshold.hash(state); self.flush_interval_sec.hash(state); self.max_optimization_threads.hash(state); } } impl PartialEq for OptimizersConfigDiff { fn eq(&self, other: &Self) -> bool { self.deleted_threshold.map(f64::to_le_bytes) == other.deleted_threshold.map(f64::to_le_bytes) && self.vacuum_min_vector_number == other.vacuum_min_vector_number && self.default_segment_number == other.default_segment_number && self.max_segment_size == other.max_segment_size && self.memmap_threshold == other.memmap_threshold && self.indexing_threshold == other.indexing_threshold && self.flush_interval_sec == other.flush_interval_sec && self.max_optimization_threads == other.max_optimization_threads } } impl Eq for OptimizersConfigDiff {} impl DiffConfig<HnswConfig> for HnswConfigDiff {} impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {} impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {} impl DiffConfig<WalConfig> for WalConfigDiff {} impl DiffConfig<CollectionParams> for CollectionParamsDiff {} impl From<HnswConfig> for HnswConfigDiff { fn from(config: HnswConfig) -> Self { HnswConfigDiff::from_full(&config).unwrap() } } impl From<OptimizersConfig> for OptimizersConfigDiff { fn from(config: OptimizersConfig) -> Self { OptimizersConfigDiff::from_full(&config).unwrap() } } impl From<WalConfig> for WalConfigDiff { fn from(config: WalConfig) -> Self { WalConfigDiff::from_full(&config).unwrap() } } impl From<CollectionParams> for CollectionParamsDiff { fn from(config: CollectionParams) -> Self { CollectionParamsDiff::from_full(&config).unwrap() } } pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>( full_config: &T, ) -> CollectionResult<Y> { let json = serde_json::to_value(full_config)?; let res = serde_json::from_value(json)?; Ok(res) } /// Merge first level of JSON values, if diff values present explicitly /// /// Example: /// /// base: {"a": 1, "b": 2} /// diff: {"a": 3} /// result: {"a": 3, "b": 2} /// /// base: {"a": 1, "b": 2} /// diff: {"a": null} /// result: {"a": 1, "b": 2} fn merge_level_0(base: &mut Value, diff: Value) { match (base, diff) { (base @ &mut Value::Object(_), Value::Object(diff)) => { let base = base.as_object_mut().unwrap(); for (k, v) in diff { if!v.is_null() { base.insert(k, v); } } } (_base, _diff) => {} } } /// Hacky way to update configuration structures with diff-updates. /// Intended to only be used in non critical for speed places. /// TODO: replace with proc macro pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>( config: &T, update: Y, ) -> CollectionResult<T> { let mut config_values = serde_json::to_value(config)?; let diff_values = serde_json::to_value(&update)?; merge_level_0(&mut config_values, diff_values); let res = serde_json::from_value(config_values)?; Ok(res) } /// Hacky way to figure out if the given configuration is considered empty /// /// The following types are considered empty: /// - Null /// - Empty string /// - Array or object with zero items /// /// Intended to only be used in non critical for speed places. pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> { let config_values = serde_json::to_value(config)?; Ok(match config_values { Value::Null => true, Value::String(value) => value.is_empty(), Value::Array(values) => values.is_empty(), Value::Object(values) => values.is_empty(), Value::Bool(_) | Value::Number(_) => false, }) } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] pub enum Disabled { Disabled, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] #[serde(rename_all = "snake_case")] #[serde(untagged)] pub enum QuantizationConfigDiff { Scalar(ScalarQuantization), Product(ProductQuantization), Disabled(Disabled), } impl QuantizationConfigDiff { pub fn new_disabled() -> Self { QuantizationConfigDiff::Disabled(Disabled::Disabled) } } impl Validate for QuantizationConfigDiff { fn validate(&self) -> Result<(), ValidationErrors> { match self { QuantizationConfigDiff::Scalar(scalar) => scalar.validate(), QuantizationConfigDiff::Product(product) => product.validate(), QuantizationConfigDiff::Disabled(_) => Ok(()), } } } #[cfg(test)] mod tests { use std::num::NonZeroU64; use segment::types::{Distance, HnswConfig}; use super::*; use crate::operations::types::VectorParams; use crate::optimizers_builder::OptimizersConfig; #[test] fn test_update_collection_params() { let params = CollectionParams { vectors: VectorParams { size: NonZeroU64::new(128).unwrap(), distance: Distance::Cosine, hnsw_config: None, quantization_config: None, on_disk: None, } .into(), shard_number: NonZeroU32::new(1).unwrap(), replication_factor: NonZeroU32::new(1).unwrap(), write_consistency_factor: NonZeroU32::new(1).unwrap(), on_disk_payload: false, }; let diff = CollectionParamsDiff { replication_factor: None, write_consistency_factor: Some(NonZeroU32::new(2).unwrap()), on_disk_payload: None, }; let new_params = diff.update(&params).unwrap(); assert_eq!(new_params.replication_factor.get(), 1); assert_eq!(new_params.write_consistency_factor.get(), 2); assert!(!new_params.on_disk_payload); } #[test] fn test_hnsw_update() { let base_config = HnswConfig::default(); let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.m, 32) } #[test] fn test_optimizer_update() { let base_config = OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 10, max_segment_size: None, memmap_threshold: None, indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: 1, }; let update: OptimizersConfigDiff = serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.indexing_threshold, Some(10000)) } #[test] fn test_wal_config() { let base_config = WalConfig::default(); let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.wal_segments_ahead, 2) } }
{ from_full(full) }
identifier_body
config_diff.rs
use std::num::NonZeroU32; use merge::Merge; use schemars::JsonSchema; use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use serde_json::Value; use validator::{Validate, ValidationErrors}; use crate::config::{CollectionParams, WalConfig}; use crate::operations::types::CollectionResult; use crate::optimizers_builder::OptimizersConfig; // Structures for partial update of collection params // TODO: make auto-generated somehow... pub trait DiffConfig<T: DeserializeOwned + Serialize> { /// Update the given `config` with fields in this diff /// /// This clones, modifies and returns `config`. /// /// This diff has higher priority, meaning that fields specified in this diff will always be in /// the returned object. fn update(self, config: &T) -> CollectionResult<T> where Self: Sized + Serialize + DeserializeOwned + Merge, { update_config(config, self) } fn from_full(full: &T) -> CollectionResult<Self> where Self: Sized + Serialize + DeserializeOwned, { from_full(full) } } #[derive( Debug, Default, Deserialize, Serialize, JsonSchema, Validate, Copy, Clone, PartialEq, Eq, Merge, Hash, )] #[serde(rename_all = "snake_case")] pub struct HnswConfigDiff { /// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. #[serde(skip_serializing_if = "Option::is_none")] pub m: Option<usize>, /// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. #[validate(range(min = 4))] #[serde(skip_serializing_if = "Option::is_none")] pub ef_construct: Option<usize>, /// Minimal size (in kilobytes) of vectors for additional payload-based indexing. /// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - /// in this case full-scan search should be preferred by query planner and additional indexing is not required. /// Note: 1Kb = 1 vector of size 256 #[serde( alias = "full_scan_threshold_kb", default, skip_serializing_if = "Option::is_none" )] #[validate(range(min = 10))] pub full_scan_threshold: Option<usize>, /// Number of parallel threads used for background index building. If 0 - auto selection. #[serde(default, skip_serializing_if = "Option::is_none")] pub max_indexing_threads: Option<usize>, /// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false #[serde(default, skip_serializing_if = "Option::is_none")] pub on_disk: Option<bool>, /// Custom M param for additional payload-aware HNSW links. If not set, default M will be used. #[serde(default, skip_serializing_if = "Option::is_none")] pub payload_m: Option<usize>, } #[derive( Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash, )] pub struct WalConfigDiff { /// Size of a single WAL segment in MB #[validate(range(min = 1))] pub wal_capacity_mb: Option<usize>, /// Number of WAL segments to create ahead of actually used ones pub wal_segments_ahead: Option<usize>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)] pub struct CollectionParamsDiff { /// Number of replicas for each shard pub replication_factor: Option<NonZeroU32>, /// Minimal number successful responses from replicas to consider operation successful pub write_consistency_factor: Option<NonZeroU32>, /// If true - point's payload will not be stored in memory. /// It will be read from the disk every time it is requested. /// This setting saves RAM by (slightly) increasing the response time. /// Note: those payload values that are involved in filtering and are indexed - remain in RAM. #[serde(default)] pub on_disk_payload: Option<bool>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)] pub struct OptimizersConfigDiff { /// The minimal fraction of deleted vectors in a segment, required to perform segment optimization pub deleted_threshold: Option<f64>, /// The minimal number of vectors in a segment, required to perform segment optimization pub vacuum_min_vector_number: Option<usize>, /// Target amount of segments optimizer will try to keep. /// Real amount of segments may vary depending on multiple parameters: /// - Amount of stored points /// - Current write RPS /// /// It is recommended to select default number of segments as a factor of the number of search threads, /// so that each segment would be handled evenly by one of the threads /// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs pub default_segment_number: Option<usize>, /// Do not create segments larger this size (in kilobytes). /// Large segments might require disproportionately long indexation times, /// therefore it makes sense to limit the size of segments. /// /// If indexation speed have more priority for your - make this parameter lower. /// If search speed is more important - make this parameter higher. /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "max_segment_size_kb")] pub max_segment_size: Option<usize>, /// Maximum size (in kilobytes) of vectors to store in-memory per segment. /// Segments larger than this threshold will be stored as read-only memmaped file. /// /// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. /// /// To disable memmap storage, set this to `0`. /// /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "memmap_threshold_kb")] pub memmap_threshold: Option<usize>, /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing /// /// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>. /// /// To disable vector indexing, set to `0`. /// /// Note: 1kB = 1 vector of size 256. #[serde(alias = "indexing_threshold_kb")] pub indexing_threshold: Option<usize>, /// Minimum interval between forced flushes. pub flush_interval_sec: Option<u64>, /// Maximum available threads for optimization workers pub max_optimization_threads: Option<usize>, } impl std::hash::Hash for OptimizersConfigDiff { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.deleted_threshold.map(f64::to_le_bytes).hash(state); self.vacuum_min_vector_number.hash(state); self.default_segment_number.hash(state); self.max_segment_size.hash(state); self.memmap_threshold.hash(state); self.indexing_threshold.hash(state); self.flush_interval_sec.hash(state); self.max_optimization_threads.hash(state); } } impl PartialEq for OptimizersConfigDiff { fn eq(&self, other: &Self) -> bool { self.deleted_threshold.map(f64::to_le_bytes) == other.deleted_threshold.map(f64::to_le_bytes) && self.vacuum_min_vector_number == other.vacuum_min_vector_number && self.default_segment_number == other.default_segment_number && self.max_segment_size == other.max_segment_size && self.memmap_threshold == other.memmap_threshold && self.indexing_threshold == other.indexing_threshold && self.flush_interval_sec == other.flush_interval_sec && self.max_optimization_threads == other.max_optimization_threads } } impl Eq for OptimizersConfigDiff {} impl DiffConfig<HnswConfig> for HnswConfigDiff {} impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {} impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {} impl DiffConfig<WalConfig> for WalConfigDiff {} impl DiffConfig<CollectionParams> for CollectionParamsDiff {} impl From<HnswConfig> for HnswConfigDiff { fn from(config: HnswConfig) -> Self { HnswConfigDiff::from_full(&config).unwrap() } } impl From<OptimizersConfig> for OptimizersConfigDiff { fn from(config: OptimizersConfig) -> Self { OptimizersConfigDiff::from_full(&config).unwrap() } } impl From<WalConfig> for WalConfigDiff { fn from(config: WalConfig) -> Self { WalConfigDiff::from_full(&config).unwrap() } } impl From<CollectionParams> for CollectionParamsDiff { fn from(config: CollectionParams) -> Self { CollectionParamsDiff::from_full(&config).unwrap() } } pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>( full_config: &T, ) -> CollectionResult<Y> { let json = serde_json::to_value(full_config)?; let res = serde_json::from_value(json)?; Ok(res) } /// Merge first level of JSON values, if diff values present explicitly /// /// Example: /// /// base: {"a": 1, "b": 2} /// diff: {"a": 3} /// result: {"a": 3, "b": 2} /// /// base: {"a": 1, "b": 2}
/// result: {"a": 1, "b": 2} fn merge_level_0(base: &mut Value, diff: Value) { match (base, diff) { (base @ &mut Value::Object(_), Value::Object(diff)) => { let base = base.as_object_mut().unwrap(); for (k, v) in diff { if!v.is_null() { base.insert(k, v); } } } (_base, _diff) => {} } } /// Hacky way to update configuration structures with diff-updates. /// Intended to only be used in non critical for speed places. /// TODO: replace with proc macro pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>( config: &T, update: Y, ) -> CollectionResult<T> { let mut config_values = serde_json::to_value(config)?; let diff_values = serde_json::to_value(&update)?; merge_level_0(&mut config_values, diff_values); let res = serde_json::from_value(config_values)?; Ok(res) } /// Hacky way to figure out if the given configuration is considered empty /// /// The following types are considered empty: /// - Null /// - Empty string /// - Array or object with zero items /// /// Intended to only be used in non critical for speed places. pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> { let config_values = serde_json::to_value(config)?; Ok(match config_values { Value::Null => true, Value::String(value) => value.is_empty(), Value::Array(values) => values.is_empty(), Value::Object(values) => values.is_empty(), Value::Bool(_) | Value::Number(_) => false, }) } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] pub enum Disabled { Disabled, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] #[serde(rename_all = "snake_case")] #[serde(untagged)] pub enum QuantizationConfigDiff { Scalar(ScalarQuantization), Product(ProductQuantization), Disabled(Disabled), } impl QuantizationConfigDiff { pub fn new_disabled() -> Self { QuantizationConfigDiff::Disabled(Disabled::Disabled) } } impl Validate for QuantizationConfigDiff { fn validate(&self) -> Result<(), ValidationErrors> { match self { QuantizationConfigDiff::Scalar(scalar) => scalar.validate(), QuantizationConfigDiff::Product(product) => product.validate(), QuantizationConfigDiff::Disabled(_) => Ok(()), } } } #[cfg(test)] mod tests { use std::num::NonZeroU64; use segment::types::{Distance, HnswConfig}; use super::*; use crate::operations::types::VectorParams; use crate::optimizers_builder::OptimizersConfig; #[test] fn test_update_collection_params() { let params = CollectionParams { vectors: VectorParams { size: NonZeroU64::new(128).unwrap(), distance: Distance::Cosine, hnsw_config: None, quantization_config: None, on_disk: None, } .into(), shard_number: NonZeroU32::new(1).unwrap(), replication_factor: NonZeroU32::new(1).unwrap(), write_consistency_factor: NonZeroU32::new(1).unwrap(), on_disk_payload: false, }; let diff = CollectionParamsDiff { replication_factor: None, write_consistency_factor: Some(NonZeroU32::new(2).unwrap()), on_disk_payload: None, }; let new_params = diff.update(&params).unwrap(); assert_eq!(new_params.replication_factor.get(), 1); assert_eq!(new_params.write_consistency_factor.get(), 2); assert!(!new_params.on_disk_payload); } #[test] fn test_hnsw_update() { let base_config = HnswConfig::default(); let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.m, 32) } #[test] fn test_optimizer_update() { let base_config = OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 10, max_segment_size: None, memmap_threshold: None, indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: 1, }; let update: OptimizersConfigDiff = serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.indexing_threshold, Some(10000)) } #[test] fn test_wal_config() { let base_config = WalConfig::default(); let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.wal_segments_ahead, 2) } }
/// diff: {"a": null}
random_line_split
config_diff.rs
use std::num::NonZeroU32; use merge::Merge; use schemars::JsonSchema; use segment::types::{HnswConfig, ProductQuantization, ScalarQuantization}; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use serde_json::Value; use validator::{Validate, ValidationErrors}; use crate::config::{CollectionParams, WalConfig}; use crate::operations::types::CollectionResult; use crate::optimizers_builder::OptimizersConfig; // Structures for partial update of collection params // TODO: make auto-generated somehow... pub trait DiffConfig<T: DeserializeOwned + Serialize> { /// Update the given `config` with fields in this diff /// /// This clones, modifies and returns `config`. /// /// This diff has higher priority, meaning that fields specified in this diff will always be in /// the returned object. fn update(self, config: &T) -> CollectionResult<T> where Self: Sized + Serialize + DeserializeOwned + Merge, { update_config(config, self) } fn from_full(full: &T) -> CollectionResult<Self> where Self: Sized + Serialize + DeserializeOwned, { from_full(full) } } #[derive( Debug, Default, Deserialize, Serialize, JsonSchema, Validate, Copy, Clone, PartialEq, Eq, Merge, Hash, )] #[serde(rename_all = "snake_case")] pub struct HnswConfigDiff { /// Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. #[serde(skip_serializing_if = "Option::is_none")] pub m: Option<usize>, /// Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. #[validate(range(min = 4))] #[serde(skip_serializing_if = "Option::is_none")] pub ef_construct: Option<usize>, /// Minimal size (in kilobytes) of vectors for additional payload-based indexing. /// If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - /// in this case full-scan search should be preferred by query planner and additional indexing is not required. /// Note: 1Kb = 1 vector of size 256 #[serde( alias = "full_scan_threshold_kb", default, skip_serializing_if = "Option::is_none" )] #[validate(range(min = 10))] pub full_scan_threshold: Option<usize>, /// Number of parallel threads used for background index building. If 0 - auto selection. #[serde(default, skip_serializing_if = "Option::is_none")] pub max_indexing_threads: Option<usize>, /// Store HNSW index on disk. If set to false, the index will be stored in RAM. Default: false #[serde(default, skip_serializing_if = "Option::is_none")] pub on_disk: Option<bool>, /// Custom M param for additional payload-aware HNSW links. If not set, default M will be used. #[serde(default, skip_serializing_if = "Option::is_none")] pub payload_m: Option<usize>, } #[derive( Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge, PartialEq, Eq, Hash, )] pub struct WalConfigDiff { /// Size of a single WAL segment in MB #[validate(range(min = 1))] pub wal_capacity_mb: Option<usize>, /// Number of WAL segments to create ahead of actually used ones pub wal_segments_ahead: Option<usize>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, Merge, PartialEq, Eq, Hash)] pub struct CollectionParamsDiff { /// Number of replicas for each shard pub replication_factor: Option<NonZeroU32>, /// Minimal number successful responses from replicas to consider operation successful pub write_consistency_factor: Option<NonZeroU32>, /// If true - point's payload will not be stored in memory. /// It will be read from the disk every time it is requested. /// This setting saves RAM by (slightly) increasing the response time. /// Note: those payload values that are involved in filtering and are indexed - remain in RAM. #[serde(default)] pub on_disk_payload: Option<bool>, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Validate, Clone, Merge)] pub struct OptimizersConfigDiff { /// The minimal fraction of deleted vectors in a segment, required to perform segment optimization pub deleted_threshold: Option<f64>, /// The minimal number of vectors in a segment, required to perform segment optimization pub vacuum_min_vector_number: Option<usize>, /// Target amount of segments optimizer will try to keep. /// Real amount of segments may vary depending on multiple parameters: /// - Amount of stored points /// - Current write RPS /// /// It is recommended to select default number of segments as a factor of the number of search threads, /// so that each segment would be handled evenly by one of the threads /// If `default_segment_number = 0`, will be automatically selected by the number of available CPUs pub default_segment_number: Option<usize>, /// Do not create segments larger this size (in kilobytes). /// Large segments might require disproportionately long indexation times, /// therefore it makes sense to limit the size of segments. /// /// If indexation speed have more priority for your - make this parameter lower. /// If search speed is more important - make this parameter higher. /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "max_segment_size_kb")] pub max_segment_size: Option<usize>, /// Maximum size (in kilobytes) of vectors to store in-memory per segment. /// Segments larger than this threshold will be stored as read-only memmaped file. /// /// Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value. /// /// To disable memmap storage, set this to `0`. /// /// Note: 1Kb = 1 vector of size 256 #[serde(alias = "memmap_threshold_kb")] pub memmap_threshold: Option<usize>, /// Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing /// /// Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>. /// /// To disable vector indexing, set to `0`. /// /// Note: 1kB = 1 vector of size 256. #[serde(alias = "indexing_threshold_kb")] pub indexing_threshold: Option<usize>, /// Minimum interval between forced flushes. pub flush_interval_sec: Option<u64>, /// Maximum available threads for optimization workers pub max_optimization_threads: Option<usize>, } impl std::hash::Hash for OptimizersConfigDiff { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.deleted_threshold.map(f64::to_le_bytes).hash(state); self.vacuum_min_vector_number.hash(state); self.default_segment_number.hash(state); self.max_segment_size.hash(state); self.memmap_threshold.hash(state); self.indexing_threshold.hash(state); self.flush_interval_sec.hash(state); self.max_optimization_threads.hash(state); } } impl PartialEq for OptimizersConfigDiff { fn eq(&self, other: &Self) -> bool { self.deleted_threshold.map(f64::to_le_bytes) == other.deleted_threshold.map(f64::to_le_bytes) && self.vacuum_min_vector_number == other.vacuum_min_vector_number && self.default_segment_number == other.default_segment_number && self.max_segment_size == other.max_segment_size && self.memmap_threshold == other.memmap_threshold && self.indexing_threshold == other.indexing_threshold && self.flush_interval_sec == other.flush_interval_sec && self.max_optimization_threads == other.max_optimization_threads } } impl Eq for OptimizersConfigDiff {} impl DiffConfig<HnswConfig> for HnswConfigDiff {} impl DiffConfig<HnswConfigDiff> for HnswConfigDiff {} impl DiffConfig<OptimizersConfig> for OptimizersConfigDiff {} impl DiffConfig<WalConfig> for WalConfigDiff {} impl DiffConfig<CollectionParams> for CollectionParamsDiff {} impl From<HnswConfig> for HnswConfigDiff { fn from(config: HnswConfig) -> Self { HnswConfigDiff::from_full(&config).unwrap() } } impl From<OptimizersConfig> for OptimizersConfigDiff { fn from(config: OptimizersConfig) -> Self { OptimizersConfigDiff::from_full(&config).unwrap() } } impl From<WalConfig> for WalConfigDiff { fn from(config: WalConfig) -> Self { WalConfigDiff::from_full(&config).unwrap() } } impl From<CollectionParams> for CollectionParamsDiff { fn from(config: CollectionParams) -> Self { CollectionParamsDiff::from_full(&config).unwrap() } } pub fn from_full<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize>( full_config: &T, ) -> CollectionResult<Y> { let json = serde_json::to_value(full_config)?; let res = serde_json::from_value(json)?; Ok(res) } /// Merge first level of JSON values, if diff values present explicitly /// /// Example: /// /// base: {"a": 1, "b": 2} /// diff: {"a": 3} /// result: {"a": 3, "b": 2} /// /// base: {"a": 1, "b": 2} /// diff: {"a": null} /// result: {"a": 1, "b": 2} fn merge_level_0(base: &mut Value, diff: Value) { match (base, diff) { (base @ &mut Value::Object(_), Value::Object(diff)) => { let base = base.as_object_mut().unwrap(); for (k, v) in diff { if!v.is_null() { base.insert(k, v); } } } (_base, _diff) => {} } } /// Hacky way to update configuration structures with diff-updates. /// Intended to only be used in non critical for speed places. /// TODO: replace with proc macro pub fn update_config<T: DeserializeOwned + Serialize, Y: DeserializeOwned + Serialize + Merge>( config: &T, update: Y, ) -> CollectionResult<T> { let mut config_values = serde_json::to_value(config)?; let diff_values = serde_json::to_value(&update)?; merge_level_0(&mut config_values, diff_values); let res = serde_json::from_value(config_values)?; Ok(res) } /// Hacky way to figure out if the given configuration is considered empty /// /// The following types are considered empty: /// - Null /// - Empty string /// - Array or object with zero items /// /// Intended to only be used in non critical for speed places. pub fn is_empty<T: Serialize>(config: &T) -> CollectionResult<bool> { let config_values = serde_json::to_value(config)?; Ok(match config_values { Value::Null => true, Value::String(value) => value.is_empty(), Value::Array(values) => values.is_empty(), Value::Object(values) => values.is_empty(), Value::Bool(_) | Value::Number(_) => false, }) } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] pub enum Disabled { Disabled, } #[derive(Debug, Deserialize, Serialize, JsonSchema, Clone, PartialEq, Eq, Hash)] #[serde(rename_all = "snake_case")] #[serde(untagged)] pub enum QuantizationConfigDiff { Scalar(ScalarQuantization), Product(ProductQuantization), Disabled(Disabled), } impl QuantizationConfigDiff { pub fn
() -> Self { QuantizationConfigDiff::Disabled(Disabled::Disabled) } } impl Validate for QuantizationConfigDiff { fn validate(&self) -> Result<(), ValidationErrors> { match self { QuantizationConfigDiff::Scalar(scalar) => scalar.validate(), QuantizationConfigDiff::Product(product) => product.validate(), QuantizationConfigDiff::Disabled(_) => Ok(()), } } } #[cfg(test)] mod tests { use std::num::NonZeroU64; use segment::types::{Distance, HnswConfig}; use super::*; use crate::operations::types::VectorParams; use crate::optimizers_builder::OptimizersConfig; #[test] fn test_update_collection_params() { let params = CollectionParams { vectors: VectorParams { size: NonZeroU64::new(128).unwrap(), distance: Distance::Cosine, hnsw_config: None, quantization_config: None, on_disk: None, } .into(), shard_number: NonZeroU32::new(1).unwrap(), replication_factor: NonZeroU32::new(1).unwrap(), write_consistency_factor: NonZeroU32::new(1).unwrap(), on_disk_payload: false, }; let diff = CollectionParamsDiff { replication_factor: None, write_consistency_factor: Some(NonZeroU32::new(2).unwrap()), on_disk_payload: None, }; let new_params = diff.update(&params).unwrap(); assert_eq!(new_params.replication_factor.get(), 1); assert_eq!(new_params.write_consistency_factor.get(), 2); assert!(!new_params.on_disk_payload); } #[test] fn test_hnsw_update() { let base_config = HnswConfig::default(); let update: HnswConfigDiff = serde_json::from_str(r#"{ "m": 32 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.m, 32) } #[test] fn test_optimizer_update() { let base_config = OptimizersConfig { deleted_threshold: 0.9, vacuum_min_vector_number: 1000, default_segment_number: 10, max_segment_size: None, memmap_threshold: None, indexing_threshold: Some(50_000), flush_interval_sec: 30, max_optimization_threads: 1, }; let update: OptimizersConfigDiff = serde_json::from_str(r#"{ "indexing_threshold": 10000 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.indexing_threshold, Some(10000)) } #[test] fn test_wal_config() { let base_config = WalConfig::default(); let update: WalConfigDiff = serde_json::from_str(r#"{ "wal_segments_ahead": 2 }"#).unwrap(); let new_config = update.update(&base_config).unwrap(); assert_eq!(new_config.wal_segments_ahead, 2) } }
new_disabled
identifier_name
runtime.rs
last_changed_revision(&self, d: Durability) -> Revision { self.shared_state.revisions[d.index()].load() } /// Read current value of the revision counter. #[inline] pub(crate) fn pending_revision(&self) -> Revision { self.shared_state.pending_revision.load() } #[cold] pub(crate) fn unwind_cancelled(&self) { self.report_untracked_read(); Cancelled::PendingWrite.throw(); } /// Acquires the **global query write lock** (ensuring that no queries are /// executing) and then increments the current revision counter; invokes /// `op` with the global query write lock still held. /// /// While we wait to acquire the global query write lock, this method will /// also increment `pending_revision_increments`, thus signalling to queries /// that their results are "cancelled" and they should abort as expeditiously /// as possible. /// /// The `op` closure should actually perform the writes needed. It is given /// the new revision as an argument, and its return value indicates whether /// any pre-existing value was modified: /// /// - returning `None` means that no pre-existing value was modified (this /// could occur e.g. when setting some key on an input that was never set /// before) /// - returning `Some(d)` indicates that a pre-existing value was modified /// and it had the durability `d`. This will update the records for when /// values with each durability were modified. /// /// Note that, given our writer model, we can assume that only one thread is /// attempting to increment the global revision at a time. pub(crate) fn with_incremented_revision<F>(&mut self, op: F) where F: FnOnce(Revision) -> Option<Durability>, { log::debug!("increment_revision()"); if!self.permits_increment() { panic!("increment_revision invoked during a query computation"); } // Set the `pending_revision` field so that people // know current revision is cancelled. let current_revision = self.shared_state.pending_revision.fetch_then_increment(); // To modify the revision, we need the lock. let shared_state = self.shared_state.clone(); let _lock = shared_state.query_lock.write(); let old_revision = self.shared_state.revisions[0].fetch_then_increment(); assert_eq!(current_revision, old_revision); let new_revision = current_revision.next(); debug!("increment_revision: incremented to {:?}", new_revision); if let Some(d) = op(new_revision) { for rev in &self.shared_state.revisions[1..=d.index()] { rev.store(new_revision); } } } pub(crate) fn permits_increment(&self) -> bool { self.revision_guard.is_none() &&!self.local_state.query_in_progress() } #[inline] pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { self.local_state.push_query(database_key_index) } /// Reports that the currently active query read the result from /// another query. /// /// Also checks whether the "cycle participant" flag is set on /// the current stack frame -- if so, panics with `CycleParticipant` /// value, which should be caught by the code executing the query. /// /// # Parameters /// /// - `database_key`: the query whose result was read /// - `changed_revision`: the last revision in which the result of that /// query had changed pub(crate) fn report_query_read_and_unwind_if_cycle_resulted( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, ) { self.local_state .report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at); } /// Reports that the query depends on some state unknown to salsa. /// /// Queries which report untracked reads will be re-executed in the next /// revision. pub fn report_untracked_read(&self) { self.local_state .report_untracked_read(self.current_revision()); } /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. /// /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). pub fn report_synthetic_read(&self, durability: Durability) { let changed_at = self.last_changed_revision(durability); self.local_state .report_synthetic_read(durability, changed_at); } /// Handles a cycle in the dependency graph that was detected when the /// current thread tried to block on `database_key_index` which is being /// executed by `to_id`. If this function returns, then `to_id` no longer /// depends on the current thread, and so we should continue executing /// as normal. Otherwise, the function will throw a `Cycle` which is expected /// to be caught by some frame on our stack. This occurs either if there is /// a frame on our stack with cycle recovery (possibly the top one!) or if there /// is no cycle recovery at all. fn unblock_cycle_and_maybe_throw( &self, db: &dyn Database, dg: &mut DependencyGraph, database_key_index: DatabaseKeyIndex, to_id: RuntimeId, ) { debug!( "unblock_cycle_and_maybe_throw(database_key={:?})", database_key_index ); let mut from_stack = self.local_state.take_query_stack(); let from_id = self.id(); // Make a "dummy stack frame". As we iterate through the cycle, we will collect the // inputs from each participant. Then, if we are participating in cycle recovery, we // will propagate those results to all participants. let mut cycle_query = ActiveQuery::new(database_key_index); // Identify the cycle participants: let cycle = { let mut v = vec![]; dg.for_each_cycle_participant( from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut().for_each(|aq| { cycle_query.add_from(aq); v.push(aq.database_key_index); }); }, ); // We want to give the participants in a deterministic order // (at least for this execution, not necessarily across executions), // no matter where it started on the stack. Find the minimum // key and rotate it to the front. let min = v.iter().min().unwrap(); let index = v.iter().position(|p| p == min).unwrap(); v.rotate_left(index); // No need to store extra memory. v.shrink_to_fit(); Cycle::new(Arc::new(v)) }; debug!( "cycle {:?}, cycle_query {:#?}", cycle.debug(db), cycle_query, ); // We can remove the cycle participants from the list of dependencies; // they are a strongly connected component (SCC) and we only care about // dependencies to things outside the SCC that control whether it will // form again. cycle_query.remove_cycle_participants(&cycle); // Mark each cycle participant that has recovery set, along with // any frames that come after them on the same thread. Those frames // are going to be unwound so that fallback can occur. dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut() .skip_while( |aq| match db.cycle_recovery_strategy(aq.database_key_index) { CycleRecoveryStrategy::Panic => true, CycleRecoveryStrategy::Fallback => false, }, ) .for_each(|aq| { debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); aq.take_inputs_from(&cycle_query); assert!(aq.cycle.is_none()); aq.cycle = Some(cycle.clone()); }); }); // Unblock every thread that has cycle recovery with a `WaitResult::Cycle`. // They will throw the cycle, which will be caught by the frame that has // cycle recovery so that it can execute that recovery. let (me_recovered, others_recovered) = dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id); self.local_state.restore_query_stack(from_stack); if me_recovered { // If the current thread has recovery, we want to throw // so that it can begin. cycle.throw() } else if others_recovered { // If other threads have recovery but we didn't: return and we will block on them. } else { // if nobody has recover, then we panic panic_any(cycle); } } /// Block until `other_id` completes executing `database_key`; /// panic or unwind in the case of a cycle. /// /// `query_mutex_guard` is the guard for the current query's state; /// it will be dropped after we have successfully registered the /// dependency. /// /// # Propagating panics /// /// If the thread `other_id` panics, then our thread is considered /// cancelled, so this function will panic with a `Cancelled` value. /// /// # Cycle handling /// /// If the thread `other_id` already depends on the current thread, /// and hence there is a cycle in the query graph, then this function /// will unwind instead of returning normally. The method of unwinding /// depends on the [`Self::mutual_cycle_recovery_strategy`] /// of the cycle participants: /// /// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value. /// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`]. pub(crate) fn block_on_or_unwind<QueryMutexGuard>( &self, db: &dyn Database, database_key: DatabaseKeyIndex, other_id: RuntimeId, query_mutex_guard: QueryMutexGuard, ) { let mut dg = self.shared_state.dependency_graph.lock(); if dg.depends_on(other_id, self.id()) { self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id); // If the above fn returns, then (via cycle recovery) it has unblocked the // cycle, so we can continue. assert!(!dg.depends_on(other_id, self.id())); } db.salsa_event(Event { runtime_id: self.id(), kind: EventKind::WillBlockOn { other_runtime_id: other_id, database_key, }, }); let stack = self.local_state.take_query_stack(); let (stack, result) = DependencyGraph::block_on( dg, self.id(), database_key, other_id, stack, query_mutex_guard, ); self.local_state.restore_query_stack(stack); match result { WaitResult::Completed => (), // If the other thread panicked, then we consider this thread // cancelled. The assumption is that the panic will be detected // by the other thread and responded to appropriately. WaitResult::Panicked => Cancelled::PropagatedPanic.throw(), WaitResult::Cycle(c) => c.throw(), } } /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.shared_state .dependency_graph .lock() .unblock_runtimes_blocked_on(database_key, wait_result); } } /// State that will be common to all threads (when we support multiple threads) struct SharedState { /// Stores the next id to use for a snapshotted runtime (starts at 1). next_id: AtomicUsize, /// Whenever derived queries are executing, they acquire this lock /// in read mode. Mutating inputs (and thus creating a new /// revision) requires a write lock (thus guaranteeing that no /// derived queries are in progress). Note that this is not needed /// to prevent **race conditions** -- the revision counter itself /// is stored in an `AtomicUsize` so it can be cheaply read /// without acquiring the lock. Rather, the `query_lock` is used /// to ensure a higher-level consistency property. query_lock: RwLock<()>, /// This is typically equal to `revision` -- set to `revision+1` /// when a new revision is pending (which implies that the current /// revision is cancelled). pending_revision: AtomicRevision, /// Stores the "last change" revision for values of each duration. /// This vector is always of length at least 1 (for Durability 0) /// but its total length depends on the number of durations. The /// element at index 0 is special as it represents the "current /// revision". In general, we have the invariant that revisions /// in here are *declining* -- that is, `revisions[i] >= /// revisions[i + 1]`, for all `i`. This is because when you /// modify a value with durability D, that implies that values /// with durability less than D may have changed too. revisions: Vec<AtomicRevision>, /// The dependency graph tracks which runtimes are blocked on one /// another, waiting for queries to terminate. dependency_graph: Mutex<DependencyGraph>, } impl SharedState { fn with_durabilities(durabilities: usize) -> Self { SharedState { next_id: AtomicUsize::new(1), query_lock: Default::default(), revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(), pending_revision: AtomicRevision::start(), dependency_graph: Default::default(), } } } impl std::panic::RefUnwindSafe for SharedState {} impl Default for SharedState { fn default() -> Self { Self::with_durabilities(Durability::LEN) } } impl std::fmt::Debug for SharedState { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query_lock = if self.query_lock.try_write().is_some() { "<unlocked>" } else if self.query_lock.try_read().is_some() { "<rlocked>" } else { "<wlocked>" }; fmt.debug_struct("SharedState") .field("query_lock", &query_lock) .field("revisions", &self.revisions) .field("pending_revision", &self.pending_revision) .finish() } } #[derive(Debug)] struct ActiveQuery { /// What query is executing database_key_index: DatabaseKeyIndex, /// Minimum durability of inputs observed so far. durability: Durability, /// Maximum revision of all inputs observed. If we observe an /// untracked read, this will be set to the most recent revision. changed_at: Revision, /// Set of subqueries that were accessed thus far, or `None` if /// there was an untracked the read. dependencies: Option<FxIndexSet<DatabaseKeyIndex>>, /// Stores the entire cycle, if one is found and this query is part of it. cycle: Option<Cycle>, } impl ActiveQuery { fn new(database_key_index: DatabaseKeyIndex) -> Self { ActiveQuery { database_key_index, durability: Durability::MAX, changed_at: Revision::start(), dependencies: Some(FxIndexSet::default()), cycle: None, } } fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) { if let Some(set) = &mut self.dependencies { set.insert(input); } self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } fn add_untracked_read(&mut self, changed_at: Revision) { self.dependencies = None; self.durability = Durability::LOW; self.changed_at = changed_at; } fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { self.dependencies = None; self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } pub(crate) fn revisions(&self) -> QueryRevisions { let inputs = match &self.dependencies { None => QueryInputs::Untracked, Some(dependencies) => { if dependencies.is_empty() { QueryInputs::NoInputs } else { QueryInputs::Tracked { inputs: dependencies.iter().copied().collect(), } } } }; QueryRevisions { changed_at: self.changed_at, inputs, durability: self.durability, } } /// Adds any dependencies from `other` into `self`. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. fn add_from(&mut self, other: &ActiveQuery) { self.changed_at = self.changed_at.max(other.changed_at); self.durability = self.durability.min(other.durability); if let Some(other_dependencies) = &other.dependencies { if let Some(my_dependencies) = &mut self.dependencies { my_dependencies.extend(other_dependencies.iter().copied()); } } else { self.dependencies = None; } } /// Removes the participants in `cycle` from my dependencies. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. fn remove_cycle_participants(&mut self, cycle: &Cycle) { if let Some(my_dependencies) = &mut self.dependencies { for p in cycle.participant_keys() { my_dependencies.remove(&p); } } } /// Copy the changed-at, durability, and dependencies from `cycle_query`. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. pub(crate) fn take_inputs_from(&mut self, cycle_query: &ActiveQuery) { self.changed_at = cycle_query.changed_at; self.durability = cycle_query.durability; self.dependencies = cycle_query.dependencies.clone(); } } /// A unique identifier for a particular runtime. Each time you create /// a snapshot, a fresh `RuntimeId` is generated. Once a snapshot is /// complete, its `RuntimeId` may potentially be re-used. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct RuntimeId { counter: usize, } #[derive(Clone, Debug)] pub(crate) struct StampedValue<V> { pub(crate) value: V, pub(crate) durability: Durability, pub(crate) changed_at: Revision, } struct RevisionGuard { shared_state: Arc<SharedState>, } impl RevisionGuard { fn new(shared_state: &Arc<SharedState>) -> Self { // Subtle: we use a "recursive" lock here so that it is not an // error to acquire a read-lock when one is already held (this // happens when a query uses `snapshot` to spawn off parallel // workers, for example). // // This has the side-effect that we are responsible to ensure // that people contending for the write lock do not starve, // but this is what we achieve via the cancellation mechanism. // // (In particular, since we only ever have one "mutating // handle" to the database, the only contention for the global // query lock occurs when there are "futures" evaluating // queries in parallel, and those futures hold a read-lock // already, so the starvation problem is more about them bring // themselves to a close, versus preventing other people from // *starting* work). unsafe { shared_state.query_lock.raw().lock_shared_recursive(); } Self { shared_state: shared_state.clone(), } } } impl Drop for RevisionGuard { fn drop(&mut self) { // Release our read-lock without using RAII. As documented in // `Snapshot::new` above, this requires the unsafe keyword. unsafe { self.shared_state.query_lock.raw().unlock_shared();
} }
random_line_split
runtime.rs
Runtime { id: RuntimeId { counter: 0 }, revision_guard: None, shared_state: Default::default(), local_state: Default::default(), } } } impl std::fmt::Debug for Runtime { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("Runtime") .field("id", &self.id()) .field("forked", &self.revision_guard.is_some()) .field("shared_state", &self.shared_state) .finish() } } impl Runtime { /// Create a new runtime; equivalent to `Self::default`. This is /// used when creating a new database. pub fn new() -> Self { Self::default() } /// See [`crate::storage::Storage::snapshot`]. pub(crate) fn snapshot(&self) -> Self { if self.local_state.query_in_progress() { panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)"); } let revision_guard = RevisionGuard::new(&self.shared_state); let id = RuntimeId { counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst), }; Runtime { id, revision_guard: Some(revision_guard), shared_state: self.shared_state.clone(), local_state: Default::default(), } } /// A "synthetic write" causes the system to act *as though* some /// input of durability `durability` has changed. This is mostly /// useful for profiling scenarios. /// /// **WARNING:** Just like an ordinary write, this method triggers /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. pub fn synthetic_write(&mut self, durability: Durability) { self.with_incremented_revision(|_next_revision| Some(durability)); } /// The unique identifier attached to this `SalsaRuntime`. Each /// snapshotted runtime has a distinct identifier. #[inline] pub fn id(&self) -> RuntimeId { self.id } /// Returns the database-key for the query that this thread is /// actively executing (if any). pub fn active_query(&self) -> Option<DatabaseKeyIndex> { self.local_state.active_query() } /// Read current value of the revision counter. #[inline] pub(crate) fn current_revision(&self) -> Revision { self.shared_state.revisions[0].load() } /// The revision in which values with durability `d` may have last /// changed. For D0, this is just the current revision. But for /// higher levels of durability, this value may lag behind the /// current revision. If we encounter a value of durability Di, /// then, we can check this function to get a "bound" on when the /// value may have changed, which allows us to skip walking its /// dependencies. #[inline] pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision { self.shared_state.revisions[d.index()].load() } /// Read current value of the revision counter. #[inline] pub(crate) fn pending_revision(&self) -> Revision { self.shared_state.pending_revision.load() } #[cold] pub(crate) fn unwind_cancelled(&self) { self.report_untracked_read(); Cancelled::PendingWrite.throw(); } /// Acquires the **global query write lock** (ensuring that no queries are /// executing) and then increments the current revision counter; invokes /// `op` with the global query write lock still held. /// /// While we wait to acquire the global query write lock, this method will /// also increment `pending_revision_increments`, thus signalling to queries /// that their results are "cancelled" and they should abort as expeditiously /// as possible. /// /// The `op` closure should actually perform the writes needed. It is given /// the new revision as an argument, and its return value indicates whether /// any pre-existing value was modified: /// /// - returning `None` means that no pre-existing value was modified (this /// could occur e.g. when setting some key on an input that was never set /// before) /// - returning `Some(d)` indicates that a pre-existing value was modified /// and it had the durability `d`. This will update the records for when /// values with each durability were modified. /// /// Note that, given our writer model, we can assume that only one thread is /// attempting to increment the global revision at a time. pub(crate) fn with_incremented_revision<F>(&mut self, op: F) where F: FnOnce(Revision) -> Option<Durability>, { log::debug!("increment_revision()"); if!self.permits_increment() { panic!("increment_revision invoked during a query computation"); } // Set the `pending_revision` field so that people // know current revision is cancelled. let current_revision = self.shared_state.pending_revision.fetch_then_increment(); // To modify the revision, we need the lock. let shared_state = self.shared_state.clone(); let _lock = shared_state.query_lock.write(); let old_revision = self.shared_state.revisions[0].fetch_then_increment(); assert_eq!(current_revision, old_revision); let new_revision = current_revision.next(); debug!("increment_revision: incremented to {:?}", new_revision); if let Some(d) = op(new_revision) { for rev in &self.shared_state.revisions[1..=d.index()] { rev.store(new_revision); } } } pub(crate) fn permits_increment(&self) -> bool { self.revision_guard.is_none() &&!self.local_state.query_in_progress() } #[inline] pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { self.local_state.push_query(database_key_index) } /// Reports that the currently active query read the result from /// another query. /// /// Also checks whether the "cycle participant" flag is set on /// the current stack frame -- if so, panics with `CycleParticipant` /// value, which should be caught by the code executing the query. /// /// # Parameters /// /// - `database_key`: the query whose result was read /// - `changed_revision`: the last revision in which the result of that /// query had changed pub(crate) fn report_query_read_and_unwind_if_cycle_resulted( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, ) { self.local_state .report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at); } /// Reports that the query depends on some state unknown to salsa. /// /// Queries which report untracked reads will be re-executed in the next /// revision. pub fn
(&self) { self.local_state .report_untracked_read(self.current_revision()); } /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. /// /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). pub fn report_synthetic_read(&self, durability: Durability) { let changed_at = self.last_changed_revision(durability); self.local_state .report_synthetic_read(durability, changed_at); } /// Handles a cycle in the dependency graph that was detected when the /// current thread tried to block on `database_key_index` which is being /// executed by `to_id`. If this function returns, then `to_id` no longer /// depends on the current thread, and so we should continue executing /// as normal. Otherwise, the function will throw a `Cycle` which is expected /// to be caught by some frame on our stack. This occurs either if there is /// a frame on our stack with cycle recovery (possibly the top one!) or if there /// is no cycle recovery at all. fn unblock_cycle_and_maybe_throw( &self, db: &dyn Database, dg: &mut DependencyGraph, database_key_index: DatabaseKeyIndex, to_id: RuntimeId, ) { debug!( "unblock_cycle_and_maybe_throw(database_key={:?})", database_key_index ); let mut from_stack = self.local_state.take_query_stack(); let from_id = self.id(); // Make a "dummy stack frame". As we iterate through the cycle, we will collect the // inputs from each participant. Then, if we are participating in cycle recovery, we // will propagate those results to all participants. let mut cycle_query = ActiveQuery::new(database_key_index); // Identify the cycle participants: let cycle = { let mut v = vec![]; dg.for_each_cycle_participant( from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut().for_each(|aq| { cycle_query.add_from(aq); v.push(aq.database_key_index); }); }, ); // We want to give the participants in a deterministic order // (at least for this execution, not necessarily across executions), // no matter where it started on the stack. Find the minimum // key and rotate it to the front. let min = v.iter().min().unwrap(); let index = v.iter().position(|p| p == min).unwrap(); v.rotate_left(index); // No need to store extra memory. v.shrink_to_fit(); Cycle::new(Arc::new(v)) }; debug!( "cycle {:?}, cycle_query {:#?}", cycle.debug(db), cycle_query, ); // We can remove the cycle participants from the list of dependencies; // they are a strongly connected component (SCC) and we only care about // dependencies to things outside the SCC that control whether it will // form again. cycle_query.remove_cycle_participants(&cycle); // Mark each cycle participant that has recovery set, along with // any frames that come after them on the same thread. Those frames // are going to be unwound so that fallback can occur. dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut() .skip_while( |aq| match db.cycle_recovery_strategy(aq.database_key_index) { CycleRecoveryStrategy::Panic => true, CycleRecoveryStrategy::Fallback => false, }, ) .for_each(|aq| { debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); aq.take_inputs_from(&cycle_query); assert!(aq.cycle.is_none()); aq.cycle = Some(cycle.clone()); }); }); // Unblock every thread that has cycle recovery with a `WaitResult::Cycle`. // They will throw the cycle, which will be caught by the frame that has // cycle recovery so that it can execute that recovery. let (me_recovered, others_recovered) = dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id); self.local_state.restore_query_stack(from_stack); if me_recovered { // If the current thread has recovery, we want to throw // so that it can begin. cycle.throw() } else if others_recovered { // If other threads have recovery but we didn't: return and we will block on them. } else { // if nobody has recover, then we panic panic_any(cycle); } } /// Block until `other_id` completes executing `database_key`; /// panic or unwind in the case of a cycle. /// /// `query_mutex_guard` is the guard for the current query's state; /// it will be dropped after we have successfully registered the /// dependency. /// /// # Propagating panics /// /// If the thread `other_id` panics, then our thread is considered /// cancelled, so this function will panic with a `Cancelled` value. /// /// # Cycle handling /// /// If the thread `other_id` already depends on the current thread, /// and hence there is a cycle in the query graph, then this function /// will unwind instead of returning normally. The method of unwinding /// depends on the [`Self::mutual_cycle_recovery_strategy`] /// of the cycle participants: /// /// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value. /// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`]. pub(crate) fn block_on_or_unwind<QueryMutexGuard>( &self, db: &dyn Database, database_key: DatabaseKeyIndex, other_id: RuntimeId, query_mutex_guard: QueryMutexGuard, ) { let mut dg = self.shared_state.dependency_graph.lock(); if dg.depends_on(other_id, self.id()) { self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id); // If the above fn returns, then (via cycle recovery) it has unblocked the // cycle, so we can continue. assert!(!dg.depends_on(other_id, self.id())); } db.salsa_event(Event { runtime_id: self.id(), kind: EventKind::WillBlockOn { other_runtime_id: other_id, database_key, }, }); let stack = self.local_state.take_query_stack(); let (stack, result) = DependencyGraph::block_on( dg, self.id(), database_key, other_id, stack, query_mutex_guard, ); self.local_state.restore_query_stack(stack); match result { WaitResult::Completed => (), // If the other thread panicked, then we consider this thread // cancelled. The assumption is that the panic will be detected // by the other thread and responded to appropriately. WaitResult::Panicked => Cancelled::PropagatedPanic.throw(), WaitResult::Cycle(c) => c.throw(), } } /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.shared_state .dependency_graph .lock() .unblock_runtimes_blocked_on(database_key, wait_result); } } /// State that will be common to all threads (when we support multiple threads) struct SharedState { /// Stores the next id to use for a snapshotted runtime (starts at 1). next_id: AtomicUsize, /// Whenever derived queries are executing, they acquire this lock /// in read mode. Mutating inputs (and thus creating a new /// revision) requires a write lock (thus guaranteeing that no /// derived queries are in progress). Note that this is not needed /// to prevent **race conditions** -- the revision counter itself /// is stored in an `AtomicUsize` so it can be cheaply read /// without acquiring the lock. Rather, the `query_lock` is used /// to ensure a higher-level consistency property. query_lock: RwLock<()>, /// This is typically equal to `revision` -- set to `revision+1` /// when a new revision is pending (which implies that the current /// revision is cancelled). pending_revision: AtomicRevision, /// Stores the "last change" revision for values of each duration. /// This vector is always of length at least 1 (for Durability 0) /// but its total length depends on the number of durations. The /// element at index 0 is special as it represents the "current /// revision". In general, we have the invariant that revisions /// in here are *declining* -- that is, `revisions[i] >= /// revisions[i + 1]`, for all `i`. This is because when you /// modify a value with durability D, that implies that values /// with durability less than D may have changed too. revisions: Vec<AtomicRevision>, /// The dependency graph tracks which runtimes are blocked on one /// another, waiting for queries to terminate. dependency_graph: Mutex<DependencyGraph>, } impl SharedState { fn with_durabilities(durabilities: usize) -> Self { SharedState { next_id: AtomicUsize::new(1), query_lock: Default::default(), revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(), pending_revision: AtomicRevision::start(), dependency_graph: Default::default(), } } } impl std::panic::RefUnwindSafe for SharedState {} impl Default for SharedState { fn default() -> Self { Self::with_durabilities(Durability::LEN) } } impl std::fmt::Debug for SharedState { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query_lock = if self.query_lock.try_write().is_some() { "<unlocked>" } else if self.query_lock.try_read().is_some() { "<rlocked>" } else { "<wlocked>" }; fmt.debug_struct("SharedState") .field("query_lock", &query_lock) .field("revisions", &self.revisions) .field("pending_revision", &self.pending_revision) .finish() } } #[derive(Debug)] struct ActiveQuery { /// What query is executing database_key_index: DatabaseKeyIndex, /// Minimum durability of inputs observed so far. durability: Durability, /// Maximum revision of all inputs observed. If we observe an /// untracked read, this will be set to the most recent revision. changed_at: Revision, /// Set of subqueries that were accessed thus far, or `None` if /// there was an untracked the read. dependencies: Option<FxIndexSet<DatabaseKeyIndex>>, /// Stores the entire cycle, if one is found and this query is part of it. cycle: Option<Cycle>, } impl ActiveQuery { fn new(database_key_index: DatabaseKeyIndex) -> Self { ActiveQuery { database_key_index, durability: Durability::MAX, changed_at: Revision::start(), dependencies: Some(FxIndexSet::default()), cycle: None, } } fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) { if let Some(set) = &mut self.dependencies { set.insert(input); } self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } fn add_untracked_read(&mut self, changed_at: Revision) { self.dependencies = None; self.durability = Durability::LOW; self.changed_at = changed_at; } fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { self.dependencies = None; self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } pub(crate) fn revisions(&self) -> QueryRevisions { let inputs = match &self.dependencies { None => QueryInputs::Untracked, Some(dependencies) => { if dependencies.is_empty() { QueryInputs::NoInputs } else { QueryInputs::Tracked { inputs: dependencies.iter().copied().collect(), } } } }; QueryRevisions { changed_at: self.changed_at, inputs, durability: self.durability, } } /// Adds any dependencies from `other` into `self`. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. fn add_from(&mut self, other: &ActiveQuery) { self.changed_at = self.changed_at.max(other.changed_at); self.durability = self.durability.min(other.durability); if let Some(other_dependencies) = &
report_untracked_read
identifier_name
runtime.rs
Runtime { id: RuntimeId { counter: 0 }, revision_guard: None, shared_state: Default::default(), local_state: Default::default(), } } } impl std::fmt::Debug for Runtime { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("Runtime") .field("id", &self.id()) .field("forked", &self.revision_guard.is_some()) .field("shared_state", &self.shared_state) .finish() } } impl Runtime { /// Create a new runtime; equivalent to `Self::default`. This is /// used when creating a new database. pub fn new() -> Self { Self::default() } /// See [`crate::storage::Storage::snapshot`]. pub(crate) fn snapshot(&self) -> Self { if self.local_state.query_in_progress() { panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)"); } let revision_guard = RevisionGuard::new(&self.shared_state); let id = RuntimeId { counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst), }; Runtime { id, revision_guard: Some(revision_guard), shared_state: self.shared_state.clone(), local_state: Default::default(), } } /// A "synthetic write" causes the system to act *as though* some /// input of durability `durability` has changed. This is mostly /// useful for profiling scenarios. /// /// **WARNING:** Just like an ordinary write, this method triggers /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. pub fn synthetic_write(&mut self, durability: Durability) { self.with_incremented_revision(|_next_revision| Some(durability)); } /// The unique identifier attached to this `SalsaRuntime`. Each /// snapshotted runtime has a distinct identifier. #[inline] pub fn id(&self) -> RuntimeId { self.id } /// Returns the database-key for the query that this thread is /// actively executing (if any). pub fn active_query(&self) -> Option<DatabaseKeyIndex> { self.local_state.active_query() } /// Read current value of the revision counter. #[inline] pub(crate) fn current_revision(&self) -> Revision { self.shared_state.revisions[0].load() } /// The revision in which values with durability `d` may have last /// changed. For D0, this is just the current revision. But for /// higher levels of durability, this value may lag behind the /// current revision. If we encounter a value of durability Di, /// then, we can check this function to get a "bound" on when the /// value may have changed, which allows us to skip walking its /// dependencies. #[inline] pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision { self.shared_state.revisions[d.index()].load() } /// Read current value of the revision counter. #[inline] pub(crate) fn pending_revision(&self) -> Revision { self.shared_state.pending_revision.load() } #[cold] pub(crate) fn unwind_cancelled(&self) { self.report_untracked_read(); Cancelled::PendingWrite.throw(); } /// Acquires the **global query write lock** (ensuring that no queries are /// executing) and then increments the current revision counter; invokes /// `op` with the global query write lock still held. /// /// While we wait to acquire the global query write lock, this method will /// also increment `pending_revision_increments`, thus signalling to queries /// that their results are "cancelled" and they should abort as expeditiously /// as possible. /// /// The `op` closure should actually perform the writes needed. It is given /// the new revision as an argument, and its return value indicates whether /// any pre-existing value was modified: /// /// - returning `None` means that no pre-existing value was modified (this /// could occur e.g. when setting some key on an input that was never set /// before) /// - returning `Some(d)` indicates that a pre-existing value was modified /// and it had the durability `d`. This will update the records for when /// values with each durability were modified. /// /// Note that, given our writer model, we can assume that only one thread is /// attempting to increment the global revision at a time. pub(crate) fn with_incremented_revision<F>(&mut self, op: F) where F: FnOnce(Revision) -> Option<Durability>, { log::debug!("increment_revision()"); if!self.permits_increment() { panic!("increment_revision invoked during a query computation"); } // Set the `pending_revision` field so that people // know current revision is cancelled. let current_revision = self.shared_state.pending_revision.fetch_then_increment(); // To modify the revision, we need the lock. let shared_state = self.shared_state.clone(); let _lock = shared_state.query_lock.write(); let old_revision = self.shared_state.revisions[0].fetch_then_increment(); assert_eq!(current_revision, old_revision); let new_revision = current_revision.next(); debug!("increment_revision: incremented to {:?}", new_revision); if let Some(d) = op(new_revision)
} pub(crate) fn permits_increment(&self) -> bool { self.revision_guard.is_none() &&!self.local_state.query_in_progress() } #[inline] pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { self.local_state.push_query(database_key_index) } /// Reports that the currently active query read the result from /// another query. /// /// Also checks whether the "cycle participant" flag is set on /// the current stack frame -- if so, panics with `CycleParticipant` /// value, which should be caught by the code executing the query. /// /// # Parameters /// /// - `database_key`: the query whose result was read /// - `changed_revision`: the last revision in which the result of that /// query had changed pub(crate) fn report_query_read_and_unwind_if_cycle_resulted( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, ) { self.local_state .report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at); } /// Reports that the query depends on some state unknown to salsa. /// /// Queries which report untracked reads will be re-executed in the next /// revision. pub fn report_untracked_read(&self) { self.local_state .report_untracked_read(self.current_revision()); } /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. /// /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). pub fn report_synthetic_read(&self, durability: Durability) { let changed_at = self.last_changed_revision(durability); self.local_state .report_synthetic_read(durability, changed_at); } /// Handles a cycle in the dependency graph that was detected when the /// current thread tried to block on `database_key_index` which is being /// executed by `to_id`. If this function returns, then `to_id` no longer /// depends on the current thread, and so we should continue executing /// as normal. Otherwise, the function will throw a `Cycle` which is expected /// to be caught by some frame on our stack. This occurs either if there is /// a frame on our stack with cycle recovery (possibly the top one!) or if there /// is no cycle recovery at all. fn unblock_cycle_and_maybe_throw( &self, db: &dyn Database, dg: &mut DependencyGraph, database_key_index: DatabaseKeyIndex, to_id: RuntimeId, ) { debug!( "unblock_cycle_and_maybe_throw(database_key={:?})", database_key_index ); let mut from_stack = self.local_state.take_query_stack(); let from_id = self.id(); // Make a "dummy stack frame". As we iterate through the cycle, we will collect the // inputs from each participant. Then, if we are participating in cycle recovery, we // will propagate those results to all participants. let mut cycle_query = ActiveQuery::new(database_key_index); // Identify the cycle participants: let cycle = { let mut v = vec![]; dg.for_each_cycle_participant( from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut().for_each(|aq| { cycle_query.add_from(aq); v.push(aq.database_key_index); }); }, ); // We want to give the participants in a deterministic order // (at least for this execution, not necessarily across executions), // no matter where it started on the stack. Find the minimum // key and rotate it to the front. let min = v.iter().min().unwrap(); let index = v.iter().position(|p| p == min).unwrap(); v.rotate_left(index); // No need to store extra memory. v.shrink_to_fit(); Cycle::new(Arc::new(v)) }; debug!( "cycle {:?}, cycle_query {:#?}", cycle.debug(db), cycle_query, ); // We can remove the cycle participants from the list of dependencies; // they are a strongly connected component (SCC) and we only care about // dependencies to things outside the SCC that control whether it will // form again. cycle_query.remove_cycle_participants(&cycle); // Mark each cycle participant that has recovery set, along with // any frames that come after them on the same thread. Those frames // are going to be unwound so that fallback can occur. dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut() .skip_while( |aq| match db.cycle_recovery_strategy(aq.database_key_index) { CycleRecoveryStrategy::Panic => true, CycleRecoveryStrategy::Fallback => false, }, ) .for_each(|aq| { debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); aq.take_inputs_from(&cycle_query); assert!(aq.cycle.is_none()); aq.cycle = Some(cycle.clone()); }); }); // Unblock every thread that has cycle recovery with a `WaitResult::Cycle`. // They will throw the cycle, which will be caught by the frame that has // cycle recovery so that it can execute that recovery. let (me_recovered, others_recovered) = dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id); self.local_state.restore_query_stack(from_stack); if me_recovered { // If the current thread has recovery, we want to throw // so that it can begin. cycle.throw() } else if others_recovered { // If other threads have recovery but we didn't: return and we will block on them. } else { // if nobody has recover, then we panic panic_any(cycle); } } /// Block until `other_id` completes executing `database_key`; /// panic or unwind in the case of a cycle. /// /// `query_mutex_guard` is the guard for the current query's state; /// it will be dropped after we have successfully registered the /// dependency. /// /// # Propagating panics /// /// If the thread `other_id` panics, then our thread is considered /// cancelled, so this function will panic with a `Cancelled` value. /// /// # Cycle handling /// /// If the thread `other_id` already depends on the current thread, /// and hence there is a cycle in the query graph, then this function /// will unwind instead of returning normally. The method of unwinding /// depends on the [`Self::mutual_cycle_recovery_strategy`] /// of the cycle participants: /// /// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value. /// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`]. pub(crate) fn block_on_or_unwind<QueryMutexGuard>( &self, db: &dyn Database, database_key: DatabaseKeyIndex, other_id: RuntimeId, query_mutex_guard: QueryMutexGuard, ) { let mut dg = self.shared_state.dependency_graph.lock(); if dg.depends_on(other_id, self.id()) { self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id); // If the above fn returns, then (via cycle recovery) it has unblocked the // cycle, so we can continue. assert!(!dg.depends_on(other_id, self.id())); } db.salsa_event(Event { runtime_id: self.id(), kind: EventKind::WillBlockOn { other_runtime_id: other_id, database_key, }, }); let stack = self.local_state.take_query_stack(); let (stack, result) = DependencyGraph::block_on( dg, self.id(), database_key, other_id, stack, query_mutex_guard, ); self.local_state.restore_query_stack(stack); match result { WaitResult::Completed => (), // If the other thread panicked, then we consider this thread // cancelled. The assumption is that the panic will be detected // by the other thread and responded to appropriately. WaitResult::Panicked => Cancelled::PropagatedPanic.throw(), WaitResult::Cycle(c) => c.throw(), } } /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.shared_state .dependency_graph .lock() .unblock_runtimes_blocked_on(database_key, wait_result); } } /// State that will be common to all threads (when we support multiple threads) struct SharedState { /// Stores the next id to use for a snapshotted runtime (starts at 1). next_id: AtomicUsize, /// Whenever derived queries are executing, they acquire this lock /// in read mode. Mutating inputs (and thus creating a new /// revision) requires a write lock (thus guaranteeing that no /// derived queries are in progress). Note that this is not needed /// to prevent **race conditions** -- the revision counter itself /// is stored in an `AtomicUsize` so it can be cheaply read /// without acquiring the lock. Rather, the `query_lock` is used /// to ensure a higher-level consistency property. query_lock: RwLock<()>, /// This is typically equal to `revision` -- set to `revision+1` /// when a new revision is pending (which implies that the current /// revision is cancelled). pending_revision: AtomicRevision, /// Stores the "last change" revision for values of each duration. /// This vector is always of length at least 1 (for Durability 0) /// but its total length depends on the number of durations. The /// element at index 0 is special as it represents the "current /// revision". In general, we have the invariant that revisions /// in here are *declining* -- that is, `revisions[i] >= /// revisions[i + 1]`, for all `i`. This is because when you /// modify a value with durability D, that implies that values /// with durability less than D may have changed too. revisions: Vec<AtomicRevision>, /// The dependency graph tracks which runtimes are blocked on one /// another, waiting for queries to terminate. dependency_graph: Mutex<DependencyGraph>, } impl SharedState { fn with_durabilities(durabilities: usize) -> Self { SharedState { next_id: AtomicUsize::new(1), query_lock: Default::default(), revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(), pending_revision: AtomicRevision::start(), dependency_graph: Default::default(), } } } impl std::panic::RefUnwindSafe for SharedState {} impl Default for SharedState { fn default() -> Self { Self::with_durabilities(Durability::LEN) } } impl std::fmt::Debug for SharedState { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query_lock = if self.query_lock.try_write().is_some() { "<unlocked>" } else if self.query_lock.try_read().is_some() { "<rlocked>" } else { "<wlocked>" }; fmt.debug_struct("SharedState") .field("query_lock", &query_lock) .field("revisions", &self.revisions) .field("pending_revision", &self.pending_revision) .finish() } } #[derive(Debug)] struct ActiveQuery { /// What query is executing database_key_index: DatabaseKeyIndex, /// Minimum durability of inputs observed so far. durability: Durability, /// Maximum revision of all inputs observed. If we observe an /// untracked read, this will be set to the most recent revision. changed_at: Revision, /// Set of subqueries that were accessed thus far, or `None` if /// there was an untracked the read. dependencies: Option<FxIndexSet<DatabaseKeyIndex>>, /// Stores the entire cycle, if one is found and this query is part of it. cycle: Option<Cycle>, } impl ActiveQuery { fn new(database_key_index: DatabaseKeyIndex) -> Self { ActiveQuery { database_key_index, durability: Durability::MAX, changed_at: Revision::start(), dependencies: Some(FxIndexSet::default()), cycle: None, } } fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) { if let Some(set) = &mut self.dependencies { set.insert(input); } self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } fn add_untracked_read(&mut self, changed_at: Revision) { self.dependencies = None; self.durability = Durability::LOW; self.changed_at = changed_at; } fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { self.dependencies = None; self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } pub(crate) fn revisions(&self) -> QueryRevisions { let inputs = match &self.dependencies { None => QueryInputs::Untracked, Some(dependencies) => { if dependencies.is_empty() { QueryInputs::NoInputs } else { QueryInputs::Tracked { inputs: dependencies.iter().copied().collect(), } } } }; QueryRevisions { changed_at: self.changed_at, inputs, durability: self.durability, } } /// Adds any dependencies from `other` into `self`. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. fn add_from(&mut self, other: &ActiveQuery) { self.changed_at = self.changed_at.max(other.changed_at); self.durability = self.durability.min(other.durability); if let Some(other_dependencies) = &
{ for rev in &self.shared_state.revisions[1..=d.index()] { rev.store(new_revision); } }
conditional_block
runtime.rs
Runtime { id: RuntimeId { counter: 0 }, revision_guard: None, shared_state: Default::default(), local_state: Default::default(), } } } impl std::fmt::Debug for Runtime { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("Runtime") .field("id", &self.id()) .field("forked", &self.revision_guard.is_some()) .field("shared_state", &self.shared_state) .finish() } } impl Runtime { /// Create a new runtime; equivalent to `Self::default`. This is /// used when creating a new database. pub fn new() -> Self { Self::default() } /// See [`crate::storage::Storage::snapshot`]. pub(crate) fn snapshot(&self) -> Self { if self.local_state.query_in_progress() { panic!("it is not legal to `snapshot` during a query (see salsa-rs/salsa#80)"); } let revision_guard = RevisionGuard::new(&self.shared_state); let id = RuntimeId { counter: self.shared_state.next_id.fetch_add(1, Ordering::SeqCst), }; Runtime { id, revision_guard: Some(revision_guard), shared_state: self.shared_state.clone(), local_state: Default::default(), } } /// A "synthetic write" causes the system to act *as though* some /// input of durability `durability` has changed. This is mostly /// useful for profiling scenarios. /// /// **WARNING:** Just like an ordinary write, this method triggers /// cancellation. If you invoke it while a snapshot exists, it /// will block until that snapshot is dropped -- if that snapshot /// is owned by the current thread, this could trigger deadlock. pub fn synthetic_write(&mut self, durability: Durability) { self.with_incremented_revision(|_next_revision| Some(durability)); } /// The unique identifier attached to this `SalsaRuntime`. Each /// snapshotted runtime has a distinct identifier. #[inline] pub fn id(&self) -> RuntimeId { self.id } /// Returns the database-key for the query that this thread is /// actively executing (if any). pub fn active_query(&self) -> Option<DatabaseKeyIndex> { self.local_state.active_query() } /// Read current value of the revision counter. #[inline] pub(crate) fn current_revision(&self) -> Revision { self.shared_state.revisions[0].load() } /// The revision in which values with durability `d` may have last /// changed. For D0, this is just the current revision. But for /// higher levels of durability, this value may lag behind the /// current revision. If we encounter a value of durability Di, /// then, we can check this function to get a "bound" on when the /// value may have changed, which allows us to skip walking its /// dependencies. #[inline] pub(crate) fn last_changed_revision(&self, d: Durability) -> Revision { self.shared_state.revisions[d.index()].load() } /// Read current value of the revision counter. #[inline] pub(crate) fn pending_revision(&self) -> Revision { self.shared_state.pending_revision.load() } #[cold] pub(crate) fn unwind_cancelled(&self) { self.report_untracked_read(); Cancelled::PendingWrite.throw(); } /// Acquires the **global query write lock** (ensuring that no queries are /// executing) and then increments the current revision counter; invokes /// `op` with the global query write lock still held. /// /// While we wait to acquire the global query write lock, this method will /// also increment `pending_revision_increments`, thus signalling to queries /// that their results are "cancelled" and they should abort as expeditiously /// as possible. /// /// The `op` closure should actually perform the writes needed. It is given /// the new revision as an argument, and its return value indicates whether /// any pre-existing value was modified: /// /// - returning `None` means that no pre-existing value was modified (this /// could occur e.g. when setting some key on an input that was never set /// before) /// - returning `Some(d)` indicates that a pre-existing value was modified /// and it had the durability `d`. This will update the records for when /// values with each durability were modified. /// /// Note that, given our writer model, we can assume that only one thread is /// attempting to increment the global revision at a time. pub(crate) fn with_incremented_revision<F>(&mut self, op: F) where F: FnOnce(Revision) -> Option<Durability>, { log::debug!("increment_revision()"); if!self.permits_increment() { panic!("increment_revision invoked during a query computation"); } // Set the `pending_revision` field so that people // know current revision is cancelled. let current_revision = self.shared_state.pending_revision.fetch_then_increment(); // To modify the revision, we need the lock. let shared_state = self.shared_state.clone(); let _lock = shared_state.query_lock.write(); let old_revision = self.shared_state.revisions[0].fetch_then_increment(); assert_eq!(current_revision, old_revision); let new_revision = current_revision.next(); debug!("increment_revision: incremented to {:?}", new_revision); if let Some(d) = op(new_revision) { for rev in &self.shared_state.revisions[1..=d.index()] { rev.store(new_revision); } } } pub(crate) fn permits_increment(&self) -> bool { self.revision_guard.is_none() &&!self.local_state.query_in_progress() } #[inline] pub(crate) fn push_query(&self, database_key_index: DatabaseKeyIndex) -> ActiveQueryGuard<'_> { self.local_state.push_query(database_key_index) } /// Reports that the currently active query read the result from /// another query. /// /// Also checks whether the "cycle participant" flag is set on /// the current stack frame -- if so, panics with `CycleParticipant` /// value, which should be caught by the code executing the query. /// /// # Parameters /// /// - `database_key`: the query whose result was read /// - `changed_revision`: the last revision in which the result of that /// query had changed pub(crate) fn report_query_read_and_unwind_if_cycle_resulted( &self, input: DatabaseKeyIndex, durability: Durability, changed_at: Revision, )
/// Reports that the query depends on some state unknown to salsa. /// /// Queries which report untracked reads will be re-executed in the next /// revision. pub fn report_untracked_read(&self) { self.local_state .report_untracked_read(self.current_revision()); } /// Acts as though the current query had read an input with the given durability; this will force the current query's durability to be at most `durability`. /// /// This is mostly useful to control the durability level for [on-demand inputs](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html). pub fn report_synthetic_read(&self, durability: Durability) { let changed_at = self.last_changed_revision(durability); self.local_state .report_synthetic_read(durability, changed_at); } /// Handles a cycle in the dependency graph that was detected when the /// current thread tried to block on `database_key_index` which is being /// executed by `to_id`. If this function returns, then `to_id` no longer /// depends on the current thread, and so we should continue executing /// as normal. Otherwise, the function will throw a `Cycle` which is expected /// to be caught by some frame on our stack. This occurs either if there is /// a frame on our stack with cycle recovery (possibly the top one!) or if there /// is no cycle recovery at all. fn unblock_cycle_and_maybe_throw( &self, db: &dyn Database, dg: &mut DependencyGraph, database_key_index: DatabaseKeyIndex, to_id: RuntimeId, ) { debug!( "unblock_cycle_and_maybe_throw(database_key={:?})", database_key_index ); let mut from_stack = self.local_state.take_query_stack(); let from_id = self.id(); // Make a "dummy stack frame". As we iterate through the cycle, we will collect the // inputs from each participant. Then, if we are participating in cycle recovery, we // will propagate those results to all participants. let mut cycle_query = ActiveQuery::new(database_key_index); // Identify the cycle participants: let cycle = { let mut v = vec![]; dg.for_each_cycle_participant( from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut().for_each(|aq| { cycle_query.add_from(aq); v.push(aq.database_key_index); }); }, ); // We want to give the participants in a deterministic order // (at least for this execution, not necessarily across executions), // no matter where it started on the stack. Find the minimum // key and rotate it to the front. let min = v.iter().min().unwrap(); let index = v.iter().position(|p| p == min).unwrap(); v.rotate_left(index); // No need to store extra memory. v.shrink_to_fit(); Cycle::new(Arc::new(v)) }; debug!( "cycle {:?}, cycle_query {:#?}", cycle.debug(db), cycle_query, ); // We can remove the cycle participants from the list of dependencies; // they are a strongly connected component (SCC) and we only care about // dependencies to things outside the SCC that control whether it will // form again. cycle_query.remove_cycle_participants(&cycle); // Mark each cycle participant that has recovery set, along with // any frames that come after them on the same thread. Those frames // are going to be unwound so that fallback can occur. dg.for_each_cycle_participant(from_id, &mut from_stack, database_key_index, to_id, |aqs| { aqs.iter_mut() .skip_while( |aq| match db.cycle_recovery_strategy(aq.database_key_index) { CycleRecoveryStrategy::Panic => true, CycleRecoveryStrategy::Fallback => false, }, ) .for_each(|aq| { debug!("marking {:?} for fallback", aq.database_key_index.debug(db)); aq.take_inputs_from(&cycle_query); assert!(aq.cycle.is_none()); aq.cycle = Some(cycle.clone()); }); }); // Unblock every thread that has cycle recovery with a `WaitResult::Cycle`. // They will throw the cycle, which will be caught by the frame that has // cycle recovery so that it can execute that recovery. let (me_recovered, others_recovered) = dg.maybe_unblock_runtimes_in_cycle(from_id, &from_stack, database_key_index, to_id); self.local_state.restore_query_stack(from_stack); if me_recovered { // If the current thread has recovery, we want to throw // so that it can begin. cycle.throw() } else if others_recovered { // If other threads have recovery but we didn't: return and we will block on them. } else { // if nobody has recover, then we panic panic_any(cycle); } } /// Block until `other_id` completes executing `database_key`; /// panic or unwind in the case of a cycle. /// /// `query_mutex_guard` is the guard for the current query's state; /// it will be dropped after we have successfully registered the /// dependency. /// /// # Propagating panics /// /// If the thread `other_id` panics, then our thread is considered /// cancelled, so this function will panic with a `Cancelled` value. /// /// # Cycle handling /// /// If the thread `other_id` already depends on the current thread, /// and hence there is a cycle in the query graph, then this function /// will unwind instead of returning normally. The method of unwinding /// depends on the [`Self::mutual_cycle_recovery_strategy`] /// of the cycle participants: /// /// * [`CycleRecoveryStrategy::Panic`]: panic with the [`Cycle`] as the value. /// * [`CycleRecoveryStrategy::Fallback`]: initiate unwinding with [`CycleParticipant::unwind`]. pub(crate) fn block_on_or_unwind<QueryMutexGuard>( &self, db: &dyn Database, database_key: DatabaseKeyIndex, other_id: RuntimeId, query_mutex_guard: QueryMutexGuard, ) { let mut dg = self.shared_state.dependency_graph.lock(); if dg.depends_on(other_id, self.id()) { self.unblock_cycle_and_maybe_throw(db, &mut dg, database_key, other_id); // If the above fn returns, then (via cycle recovery) it has unblocked the // cycle, so we can continue. assert!(!dg.depends_on(other_id, self.id())); } db.salsa_event(Event { runtime_id: self.id(), kind: EventKind::WillBlockOn { other_runtime_id: other_id, database_key, }, }); let stack = self.local_state.take_query_stack(); let (stack, result) = DependencyGraph::block_on( dg, self.id(), database_key, other_id, stack, query_mutex_guard, ); self.local_state.restore_query_stack(stack); match result { WaitResult::Completed => (), // If the other thread panicked, then we consider this thread // cancelled. The assumption is that the panic will be detected // by the other thread and responded to appropriately. WaitResult::Panicked => Cancelled::PropagatedPanic.throw(), WaitResult::Cycle(c) => c.throw(), } } /// Invoked when this runtime completed computing `database_key` with /// the given result `wait_result` (`wait_result` should be `None` if /// computing `database_key` panicked and could not complete). /// This function unblocks any dependent queries and allows them /// to continue executing. pub(crate) fn unblock_queries_blocked_on( &self, database_key: DatabaseKeyIndex, wait_result: WaitResult, ) { self.shared_state .dependency_graph .lock() .unblock_runtimes_blocked_on(database_key, wait_result); } } /// State that will be common to all threads (when we support multiple threads) struct SharedState { /// Stores the next id to use for a snapshotted runtime (starts at 1). next_id: AtomicUsize, /// Whenever derived queries are executing, they acquire this lock /// in read mode. Mutating inputs (and thus creating a new /// revision) requires a write lock (thus guaranteeing that no /// derived queries are in progress). Note that this is not needed /// to prevent **race conditions** -- the revision counter itself /// is stored in an `AtomicUsize` so it can be cheaply read /// without acquiring the lock. Rather, the `query_lock` is used /// to ensure a higher-level consistency property. query_lock: RwLock<()>, /// This is typically equal to `revision` -- set to `revision+1` /// when a new revision is pending (which implies that the current /// revision is cancelled). pending_revision: AtomicRevision, /// Stores the "last change" revision for values of each duration. /// This vector is always of length at least 1 (for Durability 0) /// but its total length depends on the number of durations. The /// element at index 0 is special as it represents the "current /// revision". In general, we have the invariant that revisions /// in here are *declining* -- that is, `revisions[i] >= /// revisions[i + 1]`, for all `i`. This is because when you /// modify a value with durability D, that implies that values /// with durability less than D may have changed too. revisions: Vec<AtomicRevision>, /// The dependency graph tracks which runtimes are blocked on one /// another, waiting for queries to terminate. dependency_graph: Mutex<DependencyGraph>, } impl SharedState { fn with_durabilities(durabilities: usize) -> Self { SharedState { next_id: AtomicUsize::new(1), query_lock: Default::default(), revisions: (0..durabilities).map(|_| AtomicRevision::start()).collect(), pending_revision: AtomicRevision::start(), dependency_graph: Default::default(), } } } impl std::panic::RefUnwindSafe for SharedState {} impl Default for SharedState { fn default() -> Self { Self::with_durabilities(Durability::LEN) } } impl std::fmt::Debug for SharedState { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let query_lock = if self.query_lock.try_write().is_some() { "<unlocked>" } else if self.query_lock.try_read().is_some() { "<rlocked>" } else { "<wlocked>" }; fmt.debug_struct("SharedState") .field("query_lock", &query_lock) .field("revisions", &self.revisions) .field("pending_revision", &self.pending_revision) .finish() } } #[derive(Debug)] struct ActiveQuery { /// What query is executing database_key_index: DatabaseKeyIndex, /// Minimum durability of inputs observed so far. durability: Durability, /// Maximum revision of all inputs observed. If we observe an /// untracked read, this will be set to the most recent revision. changed_at: Revision, /// Set of subqueries that were accessed thus far, or `None` if /// there was an untracked the read. dependencies: Option<FxIndexSet<DatabaseKeyIndex>>, /// Stores the entire cycle, if one is found and this query is part of it. cycle: Option<Cycle>, } impl ActiveQuery { fn new(database_key_index: DatabaseKeyIndex) -> Self { ActiveQuery { database_key_index, durability: Durability::MAX, changed_at: Revision::start(), dependencies: Some(FxIndexSet::default()), cycle: None, } } fn add_read(&mut self, input: DatabaseKeyIndex, durability: Durability, revision: Revision) { if let Some(set) = &mut self.dependencies { set.insert(input); } self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } fn add_untracked_read(&mut self, changed_at: Revision) { self.dependencies = None; self.durability = Durability::LOW; self.changed_at = changed_at; } fn add_synthetic_read(&mut self, durability: Durability, revision: Revision) { self.dependencies = None; self.durability = self.durability.min(durability); self.changed_at = self.changed_at.max(revision); } pub(crate) fn revisions(&self) -> QueryRevisions { let inputs = match &self.dependencies { None => QueryInputs::Untracked, Some(dependencies) => { if dependencies.is_empty() { QueryInputs::NoInputs } else { QueryInputs::Tracked { inputs: dependencies.iter().copied().collect(), } } } }; QueryRevisions { changed_at: self.changed_at, inputs, durability: self.durability, } } /// Adds any dependencies from `other` into `self`. /// Used during cycle recovery, see [`Runtime::create_cycle_error`]. fn add_from(&mut self, other: &ActiveQuery) { self.changed_at = self.changed_at.max(other.changed_at); self.durability = self.durability.min(other.durability); if let Some(other_dependencies) = &
{ self.local_state .report_query_read_and_unwind_if_cycle_resulted(input, durability, changed_at); }
identifier_body
scanner.rs
use crate::file::{FileContent, FileSet}; use crate::metadata::Metadata; use std::cell::RefCell; use std::cmp; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::hash_map::Entry as HashEntry; use std::collections::BTreeMap; use std::collections::BinaryHeap; use std::collections::HashMap; use std::collections::HashSet; use std::ffi::OsString; use std::fmt::Debug; use std::fs; use std::io; use std::os::unix::fs::MetadataExt; use std::path::Path; use std::rc::Rc; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RunMode { /// Merges paths in memory, but not on disk. Gives realistic UI output. DryRun, /// Like dry run, but completely skips deduping, with no UI for dupes. DryRunNoMerging, Hardlink, } #[derive(Debug)] pub struct Settings { /// Ignore files smaller than a filesystem block. /// Deduping of such files is unlikely to save space. pub ignore_small: bool, pub run_mode: RunMode, // If 1, go to flush. If > 1, abort immediately. pub break_on: Option<&'static AtomicU32>, } impl Settings { pub fn breaks(&self) -> u32 { if let Some(break_on) = self.break_on { break_on.load(Ordering::SeqCst) } else { 0 } } } #[derive(Debug, Default, Copy, Clone)] #[cfg_attr(feature = "json", derive(serde_derive::Serialize))] pub struct Stats { pub added: usize, pub skipped: usize, pub dupes: usize, pub bytes_deduplicated: usize, pub hardlinks: usize, pub bytes_saved_by_hardlinks: usize, } pub trait ScanListener: Debug { fn file_scanned(&mut self, path: &Path, stats: &Stats); fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration); fn hardlinked(&mut self, src: &Path, dst: &Path); fn duplicate_found(&mut self, src: &Path, dst: &Path); } #[derive(Debug)] struct SilentListener; impl ScanListener for SilentListener { fn file_scanned(&mut self, _: &Path, _: &Stats) {} fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {} fn hardlinked(&mut self, _: &Path, _: &Path) {} fn duplicate_found(&mut self, _: &Path, _: &Path) {} } type RcFileSet = Rc<RefCell<FileSet>>; #[derive(Debug)] pub struct Scanner { /// All hardlinks of the same inode have to be treated as the same file by_inode: HashMap<(u64, u64), RcFileSet>, /// See Hasher for explanation by_content: BTreeMap<FileContent, Vec<RcFileSet>>, /// Directories left to scan. Sorted by inode number. /// I'm assuming scanning in this order is faster, since inode is related to file's age, /// which is related to its physical position on disk, which makes the scan more sequential. to_scan: BinaryHeap<(u64, Box<Path>)>, scan_listener: Box<dyn ScanListener>, stats: Stats, exclude: HashSet<OsString>, pub settings: Settings, deferred_count: usize, next_deferred_count: usize, } impl Scanner { pub fn new() -> Self { Scanner { settings: Settings { ignore_small: true, run_mode: RunMode::Hardlink, break_on: None, }, by_inode: HashMap::new(), by_content: BTreeMap::new(), to_scan: BinaryHeap::new(), scan_listener: Box::new(SilentListener), stats: Stats::default(), exclude: HashSet::new(), deferred_count: 0, next_deferred_count: 4096, } } pub fn exclude(&mut self, exclude: Vec<String>) { self.exclude = exclude.into_iter().map(From::from).collect(); } /// Set the scan listener. Caution: This overrides previously set listeners! /// Use a multiplexing listener if multiple listeners are required. pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) { self.scan_listener = listener; } /// Scan any file or directory for dupes. /// Dedupe is done within the path as well as against all previously added paths. pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> { self.enqueue(path)?; self.flush()?; Ok(()) } pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> { let path = fs::canonicalize(path)?.into_boxed_path(); let metadata = fs::symlink_metadata(&path)?; self.add(path, &metadata)?; Ok(()) } /// Drains the queue of directories to scan pub fn flush(&mut self) -> io::Result<()> { let start_time = Instant::now(); while let Some((_, path)) = self.to_scan.pop() { if let Err(err) = self.scan_dir(&path) { eprintln!("Error scanning {}: {}", path.display(), err); self.stats.skipped += 1; } if self.settings.breaks() > 0 { eprintln!("Stopping scan"); break; } } self.flush_deferred(); let scan_duration = Instant::now().duration_since(start_time); self.scan_listener.scan_over(self, &self.stats, scan_duration); Ok(()) } fn scan_dir(&mut self, path: &Path) -> io::Result<()> { // Errors are ignored here, since it's super common to find permission denied and unreadable symlinks, // and it'd be annoying if that aborted the whole operation. // FIXME: store the errors somehow to report them in a controlled manner for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) { if self.settings.breaks() > 0 { break; } let path = entry.path(); if let Some(file_name) = path.file_name() { if self.exclude.contains(file_name) { self.stats.skipped += 1; continue; } } if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) { eprintln!("{}: {}", entry.path().display(), err); } } Ok(()) } fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { self.scan_listener.file_scanned(&path, &self.stats); let ty = metadata.file_type(); if ty.is_dir() { // Inode is truncated to group scanning of roughly close inodes together, // But still preserve some directory traversal order. // Negation to scan from the highest (assuming latest) first. let order_key =!(metadata.ino() >> 8); self.to_scan.push((order_key, path)); return Ok(()); } else if ty.is_symlink() ||!ty.is_file() { // Support for traversing symlinks would require preventing loops // Deduping /dev/ would be funny self.stats.skipped += 1; return Ok(()); } // APFS reports 4*MB* block size let small_size = cmp::min(16 * 1024, metadata.blksize()); if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) { self.stats.skipped += 1; return Ok(()); }
self.stats.added += 1; if let Some(fileset) = self.new_fileset(&path, metadata) { self.dedupe_by_content(fileset, path, metadata)?; } else { self.stats.hardlinks += 1; self.stats.bytes_saved_by_hardlinks += metadata.size() as usize; } Ok(()) } /// Creates a new fileset if it's a new file. /// Returns None if it's a hardlink of a file already seen. fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> { let path: Box<Path> = path.into(); let device_inode = (metadata.dev(), metadata.ino()); match self.by_inode.entry(device_inode) { HashEntry::Vacant(e) => { let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink()))); e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here Some(fileset) }, HashEntry::Occupied(mut e) => { // This case may require a deferred deduping later, // if the new link belongs to an old fileset that has already been deduped. let mut t = e.get_mut().borrow_mut(); t.push(path); None }, } } /// Here's where all the magic happens fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { let mut deferred = false; match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) { BTreeEntry::Vacant(e) => { // Seems unique so far e.insert(vec![fileset]); }, BTreeEntry::Occupied(mut e) => { // Found a dupe! self.stats.dupes += 1; self.stats.bytes_deduplicated += metadata.size() as usize; let filesets = e.get_mut(); filesets.push(fileset); // Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive, // but for files that already have hardlinks it can cause unnecessary re-linking. So if there are // hardlinks in the set, wait until the end to dedupe when all hardlinks are known. if filesets.iter().all(|set| set.borrow().links() == 1) { Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?; } else { deferred = true; } }, } // Periodically flush deferred files to avoid building a huge queue // (the growing limit is a compromise between responsiveness // and potential to hit a pathological case of hardlinking with wrong hardlink groups) if deferred { self.deferred_count += 1; if self.deferred_count >= self.next_deferred_count { self.next_deferred_count *= 2; self.deferred_count = 0; self.flush_deferred(); } } Ok(()) } fn flush_deferred(&mut self) { for filesets in self.by_content.values_mut() { if self.settings.breaks() > 1 { eprintln!("Aborting"); break; } if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) { eprintln!("{}", err); } } } fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> { if run_mode == RunMode::DryRunNoMerging { return Ok(()); } // Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group let mut largest_idx = 0; let mut largest_links = 0; let mut nonempty_filesets = 0; for (idx, fileset) in filesets.iter().enumerate() { let fileset = fileset.borrow(); if!fileset.paths.is_empty() { // Only actual paths we can merge matter here nonempty_filesets += 1; } let links = fileset.links(); if links > largest_links { largest_idx = idx; largest_links = links; } } if nonempty_filesets == 0 { return Ok(()); // Already merged } // The set is still going to be in use! So everything has to be updated to make sense for the next call let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths; let source_path = merged_paths[0].clone(); for (i, set) in filesets.iter().enumerate() { // We don't want to merge the set with itself if i == largest_idx { continue; } let paths = &mut set.borrow_mut().paths; // dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors for dest_path in paths.drain(..) { assert_ne!(&source_path, &dest_path); debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino()); if run_mode == RunMode::DryRun { scan_listener.duplicate_found(&dest_path, &source_path); merged_paths.push(dest_path); continue; } let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221"); debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); // In posix link guarantees not to overwrite, and mv guarantes to move atomically // so this two-step replacement is pretty robust if let Err(err) = fs::hard_link(&source_path, &temp_path) { eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } if let Err(err) = fs::rename(&temp_path, &dest_path) { eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); scan_listener.hardlinked(&dest_path, &source_path); merged_paths.push(dest_path); } } Ok(()) } pub fn dupes(&self) -> Vec<Vec<FileSet>> { self.by_content.values().map(|filesets| { filesets.iter().map(|d|{ let tmp = d.borrow(); (*tmp).clone() }).collect() }).collect() } }
random_line_split
scanner.rs
use crate::file::{FileContent, FileSet}; use crate::metadata::Metadata; use std::cell::RefCell; use std::cmp; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::hash_map::Entry as HashEntry; use std::collections::BTreeMap; use std::collections::BinaryHeap; use std::collections::HashMap; use std::collections::HashSet; use std::ffi::OsString; use std::fmt::Debug; use std::fs; use std::io; use std::os::unix::fs::MetadataExt; use std::path::Path; use std::rc::Rc; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RunMode { /// Merges paths in memory, but not on disk. Gives realistic UI output. DryRun, /// Like dry run, but completely skips deduping, with no UI for dupes. DryRunNoMerging, Hardlink, } #[derive(Debug)] pub struct Settings { /// Ignore files smaller than a filesystem block. /// Deduping of such files is unlikely to save space. pub ignore_small: bool, pub run_mode: RunMode, // If 1, go to flush. If > 1, abort immediately. pub break_on: Option<&'static AtomicU32>, } impl Settings { pub fn breaks(&self) -> u32 { if let Some(break_on) = self.break_on { break_on.load(Ordering::SeqCst) } else { 0 } } } #[derive(Debug, Default, Copy, Clone)] #[cfg_attr(feature = "json", derive(serde_derive::Serialize))] pub struct Stats { pub added: usize, pub skipped: usize, pub dupes: usize, pub bytes_deduplicated: usize, pub hardlinks: usize, pub bytes_saved_by_hardlinks: usize, } pub trait ScanListener: Debug { fn file_scanned(&mut self, path: &Path, stats: &Stats); fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration); fn hardlinked(&mut self, src: &Path, dst: &Path); fn duplicate_found(&mut self, src: &Path, dst: &Path); } #[derive(Debug)] struct SilentListener; impl ScanListener for SilentListener { fn file_scanned(&mut self, _: &Path, _: &Stats) {} fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {} fn hardlinked(&mut self, _: &Path, _: &Path) {} fn duplicate_found(&mut self, _: &Path, _: &Path) {} } type RcFileSet = Rc<RefCell<FileSet>>; #[derive(Debug)] pub struct Scanner { /// All hardlinks of the same inode have to be treated as the same file by_inode: HashMap<(u64, u64), RcFileSet>, /// See Hasher for explanation by_content: BTreeMap<FileContent, Vec<RcFileSet>>, /// Directories left to scan. Sorted by inode number. /// I'm assuming scanning in this order is faster, since inode is related to file's age, /// which is related to its physical position on disk, which makes the scan more sequential. to_scan: BinaryHeap<(u64, Box<Path>)>, scan_listener: Box<dyn ScanListener>, stats: Stats, exclude: HashSet<OsString>, pub settings: Settings, deferred_count: usize, next_deferred_count: usize, } impl Scanner { pub fn new() -> Self { Scanner { settings: Settings { ignore_small: true, run_mode: RunMode::Hardlink, break_on: None, }, by_inode: HashMap::new(), by_content: BTreeMap::new(), to_scan: BinaryHeap::new(), scan_listener: Box::new(SilentListener), stats: Stats::default(), exclude: HashSet::new(), deferred_count: 0, next_deferred_count: 4096, } } pub fn exclude(&mut self, exclude: Vec<String>) { self.exclude = exclude.into_iter().map(From::from).collect(); } /// Set the scan listener. Caution: This overrides previously set listeners! /// Use a multiplexing listener if multiple listeners are required. pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) { self.scan_listener = listener; } /// Scan any file or directory for dupes. /// Dedupe is done within the path as well as against all previously added paths. pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> { self.enqueue(path)?; self.flush()?; Ok(()) } pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> { let path = fs::canonicalize(path)?.into_boxed_path(); let metadata = fs::symlink_metadata(&path)?; self.add(path, &metadata)?; Ok(()) } /// Drains the queue of directories to scan pub fn flush(&mut self) -> io::Result<()> { let start_time = Instant::now(); while let Some((_, path)) = self.to_scan.pop() { if let Err(err) = self.scan_dir(&path) { eprintln!("Error scanning {}: {}", path.display(), err); self.stats.skipped += 1; } if self.settings.breaks() > 0 { eprintln!("Stopping scan"); break; } } self.flush_deferred(); let scan_duration = Instant::now().duration_since(start_time); self.scan_listener.scan_over(self, &self.stats, scan_duration); Ok(()) } fn scan_dir(&mut self, path: &Path) -> io::Result<()> { // Errors are ignored here, since it's super common to find permission denied and unreadable symlinks, // and it'd be annoying if that aborted the whole operation. // FIXME: store the errors somehow to report them in a controlled manner for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) { if self.settings.breaks() > 0 { break; } let path = entry.path(); if let Some(file_name) = path.file_name() { if self.exclude.contains(file_name) { self.stats.skipped += 1; continue; } } if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) { eprintln!("{}: {}", entry.path().display(), err); } } Ok(()) } fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { self.scan_listener.file_scanned(&path, &self.stats); let ty = metadata.file_type(); if ty.is_dir() { // Inode is truncated to group scanning of roughly close inodes together, // But still preserve some directory traversal order. // Negation to scan from the highest (assuming latest) first. let order_key =!(metadata.ino() >> 8); self.to_scan.push((order_key, path)); return Ok(()); } else if ty.is_symlink() ||!ty.is_file() { // Support for traversing symlinks would require preventing loops // Deduping /dev/ would be funny self.stats.skipped += 1; return Ok(()); } // APFS reports 4*MB* block size let small_size = cmp::min(16 * 1024, metadata.blksize()); if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) { self.stats.skipped += 1; return Ok(()); } self.stats.added += 1; if let Some(fileset) = self.new_fileset(&path, metadata) { self.dedupe_by_content(fileset, path, metadata)?; } else { self.stats.hardlinks += 1; self.stats.bytes_saved_by_hardlinks += metadata.size() as usize; } Ok(()) } /// Creates a new fileset if it's a new file. /// Returns None if it's a hardlink of a file already seen. fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> { let path: Box<Path> = path.into(); let device_inode = (metadata.dev(), metadata.ino()); match self.by_inode.entry(device_inode) { HashEntry::Vacant(e) =>
, HashEntry::Occupied(mut e) => { // This case may require a deferred deduping later, // if the new link belongs to an old fileset that has already been deduped. let mut t = e.get_mut().borrow_mut(); t.push(path); None }, } } /// Here's where all the magic happens fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { let mut deferred = false; match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) { BTreeEntry::Vacant(e) => { // Seems unique so far e.insert(vec![fileset]); }, BTreeEntry::Occupied(mut e) => { // Found a dupe! self.stats.dupes += 1; self.stats.bytes_deduplicated += metadata.size() as usize; let filesets = e.get_mut(); filesets.push(fileset); // Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive, // but for files that already have hardlinks it can cause unnecessary re-linking. So if there are // hardlinks in the set, wait until the end to dedupe when all hardlinks are known. if filesets.iter().all(|set| set.borrow().links() == 1) { Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?; } else { deferred = true; } }, } // Periodically flush deferred files to avoid building a huge queue // (the growing limit is a compromise between responsiveness // and potential to hit a pathological case of hardlinking with wrong hardlink groups) if deferred { self.deferred_count += 1; if self.deferred_count >= self.next_deferred_count { self.next_deferred_count *= 2; self.deferred_count = 0; self.flush_deferred(); } } Ok(()) } fn flush_deferred(&mut self) { for filesets in self.by_content.values_mut() { if self.settings.breaks() > 1 { eprintln!("Aborting"); break; } if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) { eprintln!("{}", err); } } } fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> { if run_mode == RunMode::DryRunNoMerging { return Ok(()); } // Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group let mut largest_idx = 0; let mut largest_links = 0; let mut nonempty_filesets = 0; for (idx, fileset) in filesets.iter().enumerate() { let fileset = fileset.borrow(); if!fileset.paths.is_empty() { // Only actual paths we can merge matter here nonempty_filesets += 1; } let links = fileset.links(); if links > largest_links { largest_idx = idx; largest_links = links; } } if nonempty_filesets == 0 { return Ok(()); // Already merged } // The set is still going to be in use! So everything has to be updated to make sense for the next call let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths; let source_path = merged_paths[0].clone(); for (i, set) in filesets.iter().enumerate() { // We don't want to merge the set with itself if i == largest_idx { continue; } let paths = &mut set.borrow_mut().paths; // dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors for dest_path in paths.drain(..) { assert_ne!(&source_path, &dest_path); debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino()); if run_mode == RunMode::DryRun { scan_listener.duplicate_found(&dest_path, &source_path); merged_paths.push(dest_path); continue; } let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221"); debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); // In posix link guarantees not to overwrite, and mv guarantes to move atomically // so this two-step replacement is pretty robust if let Err(err) = fs::hard_link(&source_path, &temp_path) { eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } if let Err(err) = fs::rename(&temp_path, &dest_path) { eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); scan_listener.hardlinked(&dest_path, &source_path); merged_paths.push(dest_path); } } Ok(()) } pub fn dupes(&self) -> Vec<Vec<FileSet>> { self.by_content.values().map(|filesets| { filesets.iter().map(|d|{ let tmp = d.borrow(); (*tmp).clone() }).collect() }).collect() } }
{ let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink()))); e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here Some(fileset) }
conditional_block
scanner.rs
use crate::file::{FileContent, FileSet}; use crate::metadata::Metadata; use std::cell::RefCell; use std::cmp; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::hash_map::Entry as HashEntry; use std::collections::BTreeMap; use std::collections::BinaryHeap; use std::collections::HashMap; use std::collections::HashSet; use std::ffi::OsString; use std::fmt::Debug; use std::fs; use std::io; use std::os::unix::fs::MetadataExt; use std::path::Path; use std::rc::Rc; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RunMode { /// Merges paths in memory, but not on disk. Gives realistic UI output. DryRun, /// Like dry run, but completely skips deduping, with no UI for dupes. DryRunNoMerging, Hardlink, } #[derive(Debug)] pub struct Settings { /// Ignore files smaller than a filesystem block. /// Deduping of such files is unlikely to save space. pub ignore_small: bool, pub run_mode: RunMode, // If 1, go to flush. If > 1, abort immediately. pub break_on: Option<&'static AtomicU32>, } impl Settings { pub fn breaks(&self) -> u32 { if let Some(break_on) = self.break_on { break_on.load(Ordering::SeqCst) } else { 0 } } } #[derive(Debug, Default, Copy, Clone)] #[cfg_attr(feature = "json", derive(serde_derive::Serialize))] pub struct Stats { pub added: usize, pub skipped: usize, pub dupes: usize, pub bytes_deduplicated: usize, pub hardlinks: usize, pub bytes_saved_by_hardlinks: usize, } pub trait ScanListener: Debug { fn file_scanned(&mut self, path: &Path, stats: &Stats); fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration); fn hardlinked(&mut self, src: &Path, dst: &Path); fn duplicate_found(&mut self, src: &Path, dst: &Path); } #[derive(Debug)] struct SilentListener; impl ScanListener for SilentListener { fn file_scanned(&mut self, _: &Path, _: &Stats) {} fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {} fn hardlinked(&mut self, _: &Path, _: &Path) {} fn duplicate_found(&mut self, _: &Path, _: &Path) {} } type RcFileSet = Rc<RefCell<FileSet>>; #[derive(Debug)] pub struct Scanner { /// All hardlinks of the same inode have to be treated as the same file by_inode: HashMap<(u64, u64), RcFileSet>, /// See Hasher for explanation by_content: BTreeMap<FileContent, Vec<RcFileSet>>, /// Directories left to scan. Sorted by inode number. /// I'm assuming scanning in this order is faster, since inode is related to file's age, /// which is related to its physical position on disk, which makes the scan more sequential. to_scan: BinaryHeap<(u64, Box<Path>)>, scan_listener: Box<dyn ScanListener>, stats: Stats, exclude: HashSet<OsString>, pub settings: Settings, deferred_count: usize, next_deferred_count: usize, } impl Scanner { pub fn new() -> Self { Scanner { settings: Settings { ignore_small: true, run_mode: RunMode::Hardlink, break_on: None, }, by_inode: HashMap::new(), by_content: BTreeMap::new(), to_scan: BinaryHeap::new(), scan_listener: Box::new(SilentListener), stats: Stats::default(), exclude: HashSet::new(), deferred_count: 0, next_deferred_count: 4096, } } pub fn exclude(&mut self, exclude: Vec<String>) { self.exclude = exclude.into_iter().map(From::from).collect(); } /// Set the scan listener. Caution: This overrides previously set listeners! /// Use a multiplexing listener if multiple listeners are required. pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) { self.scan_listener = listener; } /// Scan any file or directory for dupes. /// Dedupe is done within the path as well as against all previously added paths. pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> { self.enqueue(path)?; self.flush()?; Ok(()) } pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> { let path = fs::canonicalize(path)?.into_boxed_path(); let metadata = fs::symlink_metadata(&path)?; self.add(path, &metadata)?; Ok(()) } /// Drains the queue of directories to scan pub fn flush(&mut self) -> io::Result<()> { let start_time = Instant::now(); while let Some((_, path)) = self.to_scan.pop() { if let Err(err) = self.scan_dir(&path) { eprintln!("Error scanning {}: {}", path.display(), err); self.stats.skipped += 1; } if self.settings.breaks() > 0 { eprintln!("Stopping scan"); break; } } self.flush_deferred(); let scan_duration = Instant::now().duration_since(start_time); self.scan_listener.scan_over(self, &self.stats, scan_duration); Ok(()) } fn scan_dir(&mut self, path: &Path) -> io::Result<()>
Ok(()) } fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { self.scan_listener.file_scanned(&path, &self.stats); let ty = metadata.file_type(); if ty.is_dir() { // Inode is truncated to group scanning of roughly close inodes together, // But still preserve some directory traversal order. // Negation to scan from the highest (assuming latest) first. let order_key =!(metadata.ino() >> 8); self.to_scan.push((order_key, path)); return Ok(()); } else if ty.is_symlink() ||!ty.is_file() { // Support for traversing symlinks would require preventing loops // Deduping /dev/ would be funny self.stats.skipped += 1; return Ok(()); } // APFS reports 4*MB* block size let small_size = cmp::min(16 * 1024, metadata.blksize()); if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) { self.stats.skipped += 1; return Ok(()); } self.stats.added += 1; if let Some(fileset) = self.new_fileset(&path, metadata) { self.dedupe_by_content(fileset, path, metadata)?; } else { self.stats.hardlinks += 1; self.stats.bytes_saved_by_hardlinks += metadata.size() as usize; } Ok(()) } /// Creates a new fileset if it's a new file. /// Returns None if it's a hardlink of a file already seen. fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> { let path: Box<Path> = path.into(); let device_inode = (metadata.dev(), metadata.ino()); match self.by_inode.entry(device_inode) { HashEntry::Vacant(e) => { let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink()))); e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here Some(fileset) }, HashEntry::Occupied(mut e) => { // This case may require a deferred deduping later, // if the new link belongs to an old fileset that has already been deduped. let mut t = e.get_mut().borrow_mut(); t.push(path); None }, } } /// Here's where all the magic happens fn dedupe_by_content(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { let mut deferred = false; match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) { BTreeEntry::Vacant(e) => { // Seems unique so far e.insert(vec![fileset]); }, BTreeEntry::Occupied(mut e) => { // Found a dupe! self.stats.dupes += 1; self.stats.bytes_deduplicated += metadata.size() as usize; let filesets = e.get_mut(); filesets.push(fileset); // Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive, // but for files that already have hardlinks it can cause unnecessary re-linking. So if there are // hardlinks in the set, wait until the end to dedupe when all hardlinks are known. if filesets.iter().all(|set| set.borrow().links() == 1) { Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?; } else { deferred = true; } }, } // Periodically flush deferred files to avoid building a huge queue // (the growing limit is a compromise between responsiveness // and potential to hit a pathological case of hardlinking with wrong hardlink groups) if deferred { self.deferred_count += 1; if self.deferred_count >= self.next_deferred_count { self.next_deferred_count *= 2; self.deferred_count = 0; self.flush_deferred(); } } Ok(()) } fn flush_deferred(&mut self) { for filesets in self.by_content.values_mut() { if self.settings.breaks() > 1 { eprintln!("Aborting"); break; } if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) { eprintln!("{}", err); } } } fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> { if run_mode == RunMode::DryRunNoMerging { return Ok(()); } // Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group let mut largest_idx = 0; let mut largest_links = 0; let mut nonempty_filesets = 0; for (idx, fileset) in filesets.iter().enumerate() { let fileset = fileset.borrow(); if!fileset.paths.is_empty() { // Only actual paths we can merge matter here nonempty_filesets += 1; } let links = fileset.links(); if links > largest_links { largest_idx = idx; largest_links = links; } } if nonempty_filesets == 0 { return Ok(()); // Already merged } // The set is still going to be in use! So everything has to be updated to make sense for the next call let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths; let source_path = merged_paths[0].clone(); for (i, set) in filesets.iter().enumerate() { // We don't want to merge the set with itself if i == largest_idx { continue; } let paths = &mut set.borrow_mut().paths; // dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors for dest_path in paths.drain(..) { assert_ne!(&source_path, &dest_path); debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino()); if run_mode == RunMode::DryRun { scan_listener.duplicate_found(&dest_path, &source_path); merged_paths.push(dest_path); continue; } let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221"); debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); // In posix link guarantees not to overwrite, and mv guarantes to move atomically // so this two-step replacement is pretty robust if let Err(err) = fs::hard_link(&source_path, &temp_path) { eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } if let Err(err) = fs::rename(&temp_path, &dest_path) { eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); scan_listener.hardlinked(&dest_path, &source_path); merged_paths.push(dest_path); } } Ok(()) } pub fn dupes(&self) -> Vec<Vec<FileSet>> { self.by_content.values().map(|filesets| { filesets.iter().map(|d|{ let tmp = d.borrow(); (*tmp).clone() }).collect() }).collect() } }
{ // Errors are ignored here, since it's super common to find permission denied and unreadable symlinks, // and it'd be annoying if that aborted the whole operation. // FIXME: store the errors somehow to report them in a controlled manner for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) { if self.settings.breaks() > 0 { break; } let path = entry.path(); if let Some(file_name) = path.file_name() { if self.exclude.contains(file_name) { self.stats.skipped += 1; continue; } } if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) { eprintln!("{}: {}", entry.path().display(), err); } }
identifier_body
scanner.rs
use crate::file::{FileContent, FileSet}; use crate::metadata::Metadata; use std::cell::RefCell; use std::cmp; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::hash_map::Entry as HashEntry; use std::collections::BTreeMap; use std::collections::BinaryHeap; use std::collections::HashMap; use std::collections::HashSet; use std::ffi::OsString; use std::fmt::Debug; use std::fs; use std::io; use std::os::unix::fs::MetadataExt; use std::path::Path; use std::rc::Rc; use std::sync::atomic::AtomicU32; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum RunMode { /// Merges paths in memory, but not on disk. Gives realistic UI output. DryRun, /// Like dry run, but completely skips deduping, with no UI for dupes. DryRunNoMerging, Hardlink, } #[derive(Debug)] pub struct Settings { /// Ignore files smaller than a filesystem block. /// Deduping of such files is unlikely to save space. pub ignore_small: bool, pub run_mode: RunMode, // If 1, go to flush. If > 1, abort immediately. pub break_on: Option<&'static AtomicU32>, } impl Settings { pub fn breaks(&self) -> u32 { if let Some(break_on) = self.break_on { break_on.load(Ordering::SeqCst) } else { 0 } } } #[derive(Debug, Default, Copy, Clone)] #[cfg_attr(feature = "json", derive(serde_derive::Serialize))] pub struct Stats { pub added: usize, pub skipped: usize, pub dupes: usize, pub bytes_deduplicated: usize, pub hardlinks: usize, pub bytes_saved_by_hardlinks: usize, } pub trait ScanListener: Debug { fn file_scanned(&mut self, path: &Path, stats: &Stats); fn scan_over(&self, scanner: &Scanner, stats: &Stats, scan_duration: Duration); fn hardlinked(&mut self, src: &Path, dst: &Path); fn duplicate_found(&mut self, src: &Path, dst: &Path); } #[derive(Debug)] struct SilentListener; impl ScanListener for SilentListener { fn file_scanned(&mut self, _: &Path, _: &Stats) {} fn scan_over(&self, _: &Scanner, _: &Stats, _: Duration) {} fn hardlinked(&mut self, _: &Path, _: &Path) {} fn duplicate_found(&mut self, _: &Path, _: &Path) {} } type RcFileSet = Rc<RefCell<FileSet>>; #[derive(Debug)] pub struct Scanner { /// All hardlinks of the same inode have to be treated as the same file by_inode: HashMap<(u64, u64), RcFileSet>, /// See Hasher for explanation by_content: BTreeMap<FileContent, Vec<RcFileSet>>, /// Directories left to scan. Sorted by inode number. /// I'm assuming scanning in this order is faster, since inode is related to file's age, /// which is related to its physical position on disk, which makes the scan more sequential. to_scan: BinaryHeap<(u64, Box<Path>)>, scan_listener: Box<dyn ScanListener>, stats: Stats, exclude: HashSet<OsString>, pub settings: Settings, deferred_count: usize, next_deferred_count: usize, } impl Scanner { pub fn new() -> Self { Scanner { settings: Settings { ignore_small: true, run_mode: RunMode::Hardlink, break_on: None, }, by_inode: HashMap::new(), by_content: BTreeMap::new(), to_scan: BinaryHeap::new(), scan_listener: Box::new(SilentListener), stats: Stats::default(), exclude: HashSet::new(), deferred_count: 0, next_deferred_count: 4096, } } pub fn exclude(&mut self, exclude: Vec<String>) { self.exclude = exclude.into_iter().map(From::from).collect(); } /// Set the scan listener. Caution: This overrides previously set listeners! /// Use a multiplexing listener if multiple listeners are required. pub fn set_listener(&mut self, listener: Box<dyn ScanListener>) { self.scan_listener = listener; } /// Scan any file or directory for dupes. /// Dedupe is done within the path as well as against all previously added paths. pub fn scan(&mut self, path: impl AsRef<Path>) -> io::Result<()> { self.enqueue(path)?; self.flush()?; Ok(()) } pub fn enqueue(&mut self, path: impl AsRef<Path>) -> io::Result<()> { let path = fs::canonicalize(path)?.into_boxed_path(); let metadata = fs::symlink_metadata(&path)?; self.add(path, &metadata)?; Ok(()) } /// Drains the queue of directories to scan pub fn flush(&mut self) -> io::Result<()> { let start_time = Instant::now(); while let Some((_, path)) = self.to_scan.pop() { if let Err(err) = self.scan_dir(&path) { eprintln!("Error scanning {}: {}", path.display(), err); self.stats.skipped += 1; } if self.settings.breaks() > 0 { eprintln!("Stopping scan"); break; } } self.flush_deferred(); let scan_duration = Instant::now().duration_since(start_time); self.scan_listener.scan_over(self, &self.stats, scan_duration); Ok(()) } fn scan_dir(&mut self, path: &Path) -> io::Result<()> { // Errors are ignored here, since it's super common to find permission denied and unreadable symlinks, // and it'd be annoying if that aborted the whole operation. // FIXME: store the errors somehow to report them in a controlled manner for entry in fs::read_dir(path)?.filter_map(|p| p.ok()) { if self.settings.breaks() > 0 { break; } let path = entry.path(); if let Some(file_name) = path.file_name() { if self.exclude.contains(file_name) { self.stats.skipped += 1; continue; } } if let Err(err) = self.add(path.into_boxed_path(), &entry.metadata()?) { eprintln!("{}: {}", entry.path().display(), err); } } Ok(()) } fn add(&mut self, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { self.scan_listener.file_scanned(&path, &self.stats); let ty = metadata.file_type(); if ty.is_dir() { // Inode is truncated to group scanning of roughly close inodes together, // But still preserve some directory traversal order. // Negation to scan from the highest (assuming latest) first. let order_key =!(metadata.ino() >> 8); self.to_scan.push((order_key, path)); return Ok(()); } else if ty.is_symlink() ||!ty.is_file() { // Support for traversing symlinks would require preventing loops // Deduping /dev/ would be funny self.stats.skipped += 1; return Ok(()); } // APFS reports 4*MB* block size let small_size = cmp::min(16 * 1024, metadata.blksize()); if metadata.size() == 0 || (self.settings.ignore_small && metadata.size() < small_size) { self.stats.skipped += 1; return Ok(()); } self.stats.added += 1; if let Some(fileset) = self.new_fileset(&path, metadata) { self.dedupe_by_content(fileset, path, metadata)?; } else { self.stats.hardlinks += 1; self.stats.bytes_saved_by_hardlinks += metadata.size() as usize; } Ok(()) } /// Creates a new fileset if it's a new file. /// Returns None if it's a hardlink of a file already seen. fn new_fileset(&mut self, path: &Path, metadata: &fs::Metadata) -> Option<RcFileSet> { let path: Box<Path> = path.into(); let device_inode = (metadata.dev(), metadata.ino()); match self.by_inode.entry(device_inode) { HashEntry::Vacant(e) => { let fileset = Rc::new(RefCell::new(FileSet::new(path, metadata.nlink()))); e.insert(Rc::clone(&fileset)); // clone just bumps a refcount here Some(fileset) }, HashEntry::Occupied(mut e) => { // This case may require a deferred deduping later, // if the new link belongs to an old fileset that has already been deduped. let mut t = e.get_mut().borrow_mut(); t.push(path); None }, } } /// Here's where all the magic happens fn
(&mut self, fileset: RcFileSet, path: Box<Path>, metadata: &fs::Metadata) -> io::Result<()> { let mut deferred = false; match self.by_content.entry(FileContent::new(path, Metadata::new(metadata))) { BTreeEntry::Vacant(e) => { // Seems unique so far e.insert(vec![fileset]); }, BTreeEntry::Occupied(mut e) => { // Found a dupe! self.stats.dupes += 1; self.stats.bytes_deduplicated += metadata.size() as usize; let filesets = e.get_mut(); filesets.push(fileset); // Deduping can either be done immediately or later. Immediate is more cache-friendly and interactive, // but for files that already have hardlinks it can cause unnecessary re-linking. So if there are // hardlinks in the set, wait until the end to dedupe when all hardlinks are known. if filesets.iter().all(|set| set.borrow().links() == 1) { Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener)?; } else { deferred = true; } }, } // Periodically flush deferred files to avoid building a huge queue // (the growing limit is a compromise between responsiveness // and potential to hit a pathological case of hardlinking with wrong hardlink groups) if deferred { self.deferred_count += 1; if self.deferred_count >= self.next_deferred_count { self.next_deferred_count *= 2; self.deferred_count = 0; self.flush_deferred(); } } Ok(()) } fn flush_deferred(&mut self) { for filesets in self.by_content.values_mut() { if self.settings.breaks() > 1 { eprintln!("Aborting"); break; } if let Err(err) = Self::dedupe(filesets, self.settings.run_mode, &mut *self.scan_listener) { eprintln!("{}", err); } } } fn dedupe(filesets: &mut [RcFileSet], run_mode: RunMode, scan_listener: &mut dyn ScanListener) -> io::Result<()> { if run_mode == RunMode::DryRunNoMerging { return Ok(()); } // Find file with the largest number of hardlinks, since it's less work to merge a small group into a large group let mut largest_idx = 0; let mut largest_links = 0; let mut nonempty_filesets = 0; for (idx, fileset) in filesets.iter().enumerate() { let fileset = fileset.borrow(); if!fileset.paths.is_empty() { // Only actual paths we can merge matter here nonempty_filesets += 1; } let links = fileset.links(); if links > largest_links { largest_idx = idx; largest_links = links; } } if nonempty_filesets == 0 { return Ok(()); // Already merged } // The set is still going to be in use! So everything has to be updated to make sense for the next call let merged_paths = &mut { filesets[largest_idx].borrow_mut() }.paths; let source_path = merged_paths[0].clone(); for (i, set) in filesets.iter().enumerate() { // We don't want to merge the set with itself if i == largest_idx { continue; } let paths = &mut set.borrow_mut().paths; // dest_path will be "lost" on error, but that's fine, since we don't want to dedupe it if it causes errors for dest_path in paths.drain(..) { assert_ne!(&source_path, &dest_path); debug_assert_ne!(fs::symlink_metadata(&source_path)?.ino(), fs::symlink_metadata(&dest_path)?.ino()); if run_mode == RunMode::DryRun { scan_listener.duplicate_found(&dest_path, &source_path); merged_paths.push(dest_path); continue; } let temp_path = dest_path.with_file_name(".tmp-dupe-e1iIQcBFn5pC4MUSm-xkcd-221"); debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); // In posix link guarantees not to overwrite, and mv guarantes to move atomically // so this two-step replacement is pretty robust if let Err(err) = fs::hard_link(&source_path, &temp_path) { eprintln!("unable to hardlink {} {} due to {}", source_path.display(), temp_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } if let Err(err) = fs::rename(&temp_path, &dest_path) { eprintln!("unable to rename {} {} due to {}", temp_path.display(), dest_path.display(), err); let _ = fs::remove_file(temp_path); return Err(err); } debug_assert!(!temp_path.exists()); debug_assert!(source_path.exists()); debug_assert!(dest_path.exists()); scan_listener.hardlinked(&dest_path, &source_path); merged_paths.push(dest_path); } } Ok(()) } pub fn dupes(&self) -> Vec<Vec<FileSet>> { self.by_content.values().map(|filesets| { filesets.iter().map(|d|{ let tmp = d.borrow(); (*tmp).clone() }).collect() }).collect() } }
dedupe_by_content
identifier_name
server.rs
extern crate hashbrown; extern crate rand; use crate::command; use self::ServerError::*; use command::{Command, CommandHandler}; use hashbrown::HashMap; use rand::Rng; use std::convert::TryFrom; use std::fmt; use std::io; use std::io::prelude::*; use std::net::TcpListener; use std::net::TcpStream; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender, SyncSender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; pub type Id = usize; pub type ServerResult<T> = Result<T, ServerError>; fn bytes_to_string(buf: &[u8]) -> String { String::from(String::from_utf8_lossy(buf).trim()) } #[derive(Debug)] pub enum ServerError { InvalidConfig(&'static str), IoError(io::Error), ServerFull, } enum HandlerAsync { Working, Finished(FinishedStatus), } enum FinishedStatus { Terminated, Panicked, TimedOut, Errored(io::ErrorKind), } pub struct Server { size: usize, msg_sender: Sender<Message>, msg_recver: Receiver<Message>, handlers: HashMap<Id, Handler>, cmd_handler: CommandHandler, } impl Server { pub fn init(size: usize, cmd_prefix: char) -> ServerResult<Server> { if size == 0 { return Err(InvalidConfig("Server can not have zero connections.")); } let (msg_sender, msg_recver) = mpsc::channel(); let handlers = HashMap::with_capacity(size); let cmd_handler = CommandHandler::new(cmd_prefix); Ok(Server { size, msg_sender, msg_recver, handlers, cmd_handler, }) } #[allow(unused)] pub fn from_cfg() -> Result<Server, &'static str> { unimplemented!(); } pub fn cmd<C: Command +'static>(mut self, name: &'static str, command: C) -> Self { let command = Box::new(command); self.cmd_handler.register(name, command); self } pub fn start(mut self, listener: TcpListener) { eprintln!("Setting up listener..."); let (conn_sender, conn_recver) = mpsc::channel(); let _ = thread::spawn(move || { for stream in listener.incoming() { match stream { Ok(s) => conn_sender.send(s).expect("Connection receiver hung up!"), Err(_) => { eprintln!("There was an error receiving the connection!"); } } } }); // A bit of a hack to work around high CPU usage. This // timeout limits the amount of times per second that // the main loop runs, cutting down on the calls to these // functions significantly. Even with a very tiny timeout, // this makes the application run with very low CPU usage. let timeout = Duration::from_nanos(1000); eprintln!("Server started!"); loop { match conn_recver.recv_timeout(timeout) { Ok(s) => self .accept(s) .and_then(|id| { eprintln!("Connection {} accepted!", id); Ok(()) }) .unwrap_or_else(|e| eprintln!("Error accepting connection! Error: {:?}", e)), Err(e) if e == mpsc::RecvTimeoutError::Timeout => { self.check_handlers().iter().for_each(|id| { self.handlers .remove(id) .and_then(|handler| handler.thread.join().ok()); }); self.handle_msgs(); } Err(_) => panic!("Connection sender hung up!"), } } } fn accept(&mut self, stream: TcpStream) -> ServerResult<Id> { // Do not accept a connection if it would exceed the // max connections on the server. Just return an error // indicating that the server is full. if self.handlers.len() == self.size { return Err(ServerFull); } // We have to make sure that we don't have a duplicate // connection id. This is very unlikely to happen, but // it can, so I have to check. (Damn you, randomness!) let id = { let mut rng = rand::thread_rng(); let mut id: usize = rng.gen(); while let Some(_) = self.handlers.get(&id) { id = rng.gen(); } id }; let msg_sender = self.msg_sender.clone(); let conn = Connection::new(stream, id).map_err(IoError)?; let handler = Handler::accept(conn, msg_sender, Duration::from_secs(120)); // Don't care about the return type here since it // will always return None, due to our id check // at the beginning. self.handlers.insert(id, handler); Ok(id) } fn
(&self) -> Vec<usize> { use self::FinishedStatus::*; use self::HandlerAsync::*; self.handlers .iter() .filter(|(id, handler)| { if let Finished(status) = handler.check_status() { match status { TimedOut => { eprintln!("Connection {} timed out!", id); return true; } Errored(_) => { eprintln!("Connection {} errored!", id); return true; } Panicked => { eprintln!( "Connection {}'s Handler panicked! This is definitely a bug!", id ); return true; } Terminated => unimplemented!(), } } false }) .map(|(&id, _)| id) .collect() } fn handle_msgs(&self) { if let Ok(msg) = self.msg_recver.try_recv() { if msg.contents!= "" { if msg.contents.starts_with(self.cmd_handler.prefix) { let mut conn = self .handlers .get(&msg.from) .unwrap() .connection .lock() .expect("Another thread panicked while holding a conn lock!"); match self.cmd_handler.exec(&msg) { Ok(response) => { conn.write_bytes(response.msg.as_bytes()).unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); } Err(_) => { conn.write_bytes(b"Error").unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); } } } else { let msg_str = format!("{} -> {}", msg.from, msg.to_string()); println!("{}", msg_str); self.handlers.values().for_each(|handler| { let mut conn = handler .connection .lock() .expect("Another thread panicked while holding a conn lock!"); conn.write_bytes(msg_str.as_bytes()).unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); }); } } } } } struct Handler { status_recv: Receiver<FinishedStatus>, connection: Arc<Mutex<Connection>>, thread: thread::JoinHandle<()>, } impl Handler { fn accept(connection: Connection, msg_sender: Sender<Message>, timeout: Duration) -> Handler { let connection = Arc::new(Mutex::new(connection)); let (status_send, status_recv) = mpsc::sync_channel(0); let max_attempts = timeout.as_millis(); let thread_conn = Arc::clone(&connection); let thread = thread::spawn(move || { Handler::handle(thread_conn, status_send, msg_sender, max_attempts) }); Handler { status_recv, connection, thread, } } fn handle( conn: Arc<Mutex<Connection>>, status_sender: SyncSender<FinishedStatus>, msg_sender: Sender<Message>, max_attempts: u128, ) { use self::FinishedStatus::*; let mut attempts = 0u128; let mut buf = Vec::with_capacity(1024); // Just a default loop { thread::sleep(Duration::from_millis(1)); let mut conn = conn.lock().unwrap_or_else(|err| { // Ideally, this should not happen. This is only used to // propagate the panic if things do go south. status_sender .send(Panicked) .expect("Everything is wrong..."); panic!( "Another thread panicked while getting conn lock! Error: {}", err ); }); match conn.read_bytes(&mut buf) { Ok(_) => { // The client responded! Reset the attempts. attempts = 0; let msg_contents = bytes_to_string(&buf); let msg = Message::new(msg_contents, conn.id, None); msg_sender.send(msg).expect("Could not send Message!"); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { attempts += 1; if attempts == max_attempts { status_sender .send(TimedOut) .expect("Could not send Timed out signal!"); break; } } Err(e) => { status_sender .send(Errored(e.kind())) .expect("Could not send Errored signal!"); break; } } buf.clear(); } } fn check_status(&self) -> HandlerAsync { use self::HandlerAsync::*; match self.status_recv.try_recv() { Ok(status) => Finished(status), Err(e) if e == mpsc::TryRecvError::Empty => Working, Err(_) => panic!("Sender hung up! This should not happen."), } } } pub struct Connection { pub id: usize, stream: TcpStream, } impl Connection { fn new(stream: TcpStream, id: usize) -> io::Result<Connection> { stream.set_nonblocking(true)?; Ok(Connection { id, stream }) } fn read_bytes(&mut self, buf: &mut Vec<u8>) -> io::Result<()> { // The first two bytes are expected to be the size of the message. // This means that a message can be at most 65535 characters long. // The most significant byte comes first, and the least significant // byte second. let mut len_bytes = [0; 2]; self.stream .try_clone()? .take(2) .read_exact(&mut len_bytes)?; let len = ((len_bytes[0] as u16) << 8) + len_bytes[1] as u16; let mut msg = vec![0; len as usize].into_boxed_slice(); self.stream.read(&mut msg)?; // To remind myself what this does: We must dereference the Box, to get the // [u8] slice, and then reference it again in order to create a &[u8], since // Rust's automatic deref coercion rules won't do this for you. The reason // we need to do this is because &Box<[u8]> does not implement IntoIterator, // but &[u8] does, and Rust won't just deref to some type that implements it. buf.extend(&*msg); Ok(()) } fn write_bytes(&mut self, buf: &[u8]) -> io::Result<()> { // Somewhere to store the length bytes. let mut len_bytes = [0; 2]; // We need to write the length of the message into a variable. // Since we know that the buf.len() <= 65535, we can safely cast // it to u16. As a sanity check, I'm using try_from() to make sure // that it can be safely cast. let msg_len = u16::try_from(buf.len()).expect("converting to u16 here should always be safe!"); len_bytes[0] = (msg_len >> 8) as u8; len_bytes[1] = (msg_len & 255) as u8; let msg = [&len_bytes[..], &buf[..]].concat().into_boxed_slice(); self.stream.write_all(&msg)?; self.stream.flush()?; Ok(()) } } pub struct Message { pub contents: String, pub from: Id, pub to: Option<Id>, } impl Message { fn new(contents: String, from: Id, to: Option<Id>) -> Message { Message { contents, from, to } } } impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.contents) } }
check_handlers
identifier_name
server.rs
extern crate hashbrown; extern crate rand; use crate::command; use self::ServerError::*; use command::{Command, CommandHandler}; use hashbrown::HashMap; use rand::Rng; use std::convert::TryFrom; use std::fmt; use std::io; use std::io::prelude::*; use std::net::TcpListener; use std::net::TcpStream; use std::sync::mpsc; use std::sync::mpsc::{Receiver, Sender, SyncSender}; use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; pub type Id = usize; pub type ServerResult<T> = Result<T, ServerError>; fn bytes_to_string(buf: &[u8]) -> String { String::from(String::from_utf8_lossy(buf).trim()) } #[derive(Debug)] pub enum ServerError { InvalidConfig(&'static str), IoError(io::Error), ServerFull, } enum HandlerAsync { Working, Finished(FinishedStatus), } enum FinishedStatus { Terminated, Panicked, TimedOut, Errored(io::ErrorKind), } pub struct Server { size: usize, msg_sender: Sender<Message>, msg_recver: Receiver<Message>, handlers: HashMap<Id, Handler>, cmd_handler: CommandHandler, } impl Server { pub fn init(size: usize, cmd_prefix: char) -> ServerResult<Server> { if size == 0 { return Err(InvalidConfig("Server can not have zero connections.")); } let (msg_sender, msg_recver) = mpsc::channel(); let handlers = HashMap::with_capacity(size); let cmd_handler = CommandHandler::new(cmd_prefix);
msg_sender, msg_recver, handlers, cmd_handler, }) } #[allow(unused)] pub fn from_cfg() -> Result<Server, &'static str> { unimplemented!(); } pub fn cmd<C: Command +'static>(mut self, name: &'static str, command: C) -> Self { let command = Box::new(command); self.cmd_handler.register(name, command); self } pub fn start(mut self, listener: TcpListener) { eprintln!("Setting up listener..."); let (conn_sender, conn_recver) = mpsc::channel(); let _ = thread::spawn(move || { for stream in listener.incoming() { match stream { Ok(s) => conn_sender.send(s).expect("Connection receiver hung up!"), Err(_) => { eprintln!("There was an error receiving the connection!"); } } } }); // A bit of a hack to work around high CPU usage. This // timeout limits the amount of times per second that // the main loop runs, cutting down on the calls to these // functions significantly. Even with a very tiny timeout, // this makes the application run with very low CPU usage. let timeout = Duration::from_nanos(1000); eprintln!("Server started!"); loop { match conn_recver.recv_timeout(timeout) { Ok(s) => self .accept(s) .and_then(|id| { eprintln!("Connection {} accepted!", id); Ok(()) }) .unwrap_or_else(|e| eprintln!("Error accepting connection! Error: {:?}", e)), Err(e) if e == mpsc::RecvTimeoutError::Timeout => { self.check_handlers().iter().for_each(|id| { self.handlers .remove(id) .and_then(|handler| handler.thread.join().ok()); }); self.handle_msgs(); } Err(_) => panic!("Connection sender hung up!"), } } } fn accept(&mut self, stream: TcpStream) -> ServerResult<Id> { // Do not accept a connection if it would exceed the // max connections on the server. Just return an error // indicating that the server is full. if self.handlers.len() == self.size { return Err(ServerFull); } // We have to make sure that we don't have a duplicate // connection id. This is very unlikely to happen, but // it can, so I have to check. (Damn you, randomness!) let id = { let mut rng = rand::thread_rng(); let mut id: usize = rng.gen(); while let Some(_) = self.handlers.get(&id) { id = rng.gen(); } id }; let msg_sender = self.msg_sender.clone(); let conn = Connection::new(stream, id).map_err(IoError)?; let handler = Handler::accept(conn, msg_sender, Duration::from_secs(120)); // Don't care about the return type here since it // will always return None, due to our id check // at the beginning. self.handlers.insert(id, handler); Ok(id) } fn check_handlers(&self) -> Vec<usize> { use self::FinishedStatus::*; use self::HandlerAsync::*; self.handlers .iter() .filter(|(id, handler)| { if let Finished(status) = handler.check_status() { match status { TimedOut => { eprintln!("Connection {} timed out!", id); return true; } Errored(_) => { eprintln!("Connection {} errored!", id); return true; } Panicked => { eprintln!( "Connection {}'s Handler panicked! This is definitely a bug!", id ); return true; } Terminated => unimplemented!(), } } false }) .map(|(&id, _)| id) .collect() } fn handle_msgs(&self) { if let Ok(msg) = self.msg_recver.try_recv() { if msg.contents!= "" { if msg.contents.starts_with(self.cmd_handler.prefix) { let mut conn = self .handlers .get(&msg.from) .unwrap() .connection .lock() .expect("Another thread panicked while holding a conn lock!"); match self.cmd_handler.exec(&msg) { Ok(response) => { conn.write_bytes(response.msg.as_bytes()).unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); } Err(_) => { conn.write_bytes(b"Error").unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); } } } else { let msg_str = format!("{} -> {}", msg.from, msg.to_string()); println!("{}", msg_str); self.handlers.values().for_each(|handler| { let mut conn = handler .connection .lock() .expect("Another thread panicked while holding a conn lock!"); conn.write_bytes(msg_str.as_bytes()).unwrap_or_else(|err| { eprintln!( "Could not send message to a Connection! This is most likely a bug. Error: {}", err ); }); }); } } } } } struct Handler { status_recv: Receiver<FinishedStatus>, connection: Arc<Mutex<Connection>>, thread: thread::JoinHandle<()>, } impl Handler { fn accept(connection: Connection, msg_sender: Sender<Message>, timeout: Duration) -> Handler { let connection = Arc::new(Mutex::new(connection)); let (status_send, status_recv) = mpsc::sync_channel(0); let max_attempts = timeout.as_millis(); let thread_conn = Arc::clone(&connection); let thread = thread::spawn(move || { Handler::handle(thread_conn, status_send, msg_sender, max_attempts) }); Handler { status_recv, connection, thread, } } fn handle( conn: Arc<Mutex<Connection>>, status_sender: SyncSender<FinishedStatus>, msg_sender: Sender<Message>, max_attempts: u128, ) { use self::FinishedStatus::*; let mut attempts = 0u128; let mut buf = Vec::with_capacity(1024); // Just a default loop { thread::sleep(Duration::from_millis(1)); let mut conn = conn.lock().unwrap_or_else(|err| { // Ideally, this should not happen. This is only used to // propagate the panic if things do go south. status_sender .send(Panicked) .expect("Everything is wrong..."); panic!( "Another thread panicked while getting conn lock! Error: {}", err ); }); match conn.read_bytes(&mut buf) { Ok(_) => { // The client responded! Reset the attempts. attempts = 0; let msg_contents = bytes_to_string(&buf); let msg = Message::new(msg_contents, conn.id, None); msg_sender.send(msg).expect("Could not send Message!"); } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { attempts += 1; if attempts == max_attempts { status_sender .send(TimedOut) .expect("Could not send Timed out signal!"); break; } } Err(e) => { status_sender .send(Errored(e.kind())) .expect("Could not send Errored signal!"); break; } } buf.clear(); } } fn check_status(&self) -> HandlerAsync { use self::HandlerAsync::*; match self.status_recv.try_recv() { Ok(status) => Finished(status), Err(e) if e == mpsc::TryRecvError::Empty => Working, Err(_) => panic!("Sender hung up! This should not happen."), } } } pub struct Connection { pub id: usize, stream: TcpStream, } impl Connection { fn new(stream: TcpStream, id: usize) -> io::Result<Connection> { stream.set_nonblocking(true)?; Ok(Connection { id, stream }) } fn read_bytes(&mut self, buf: &mut Vec<u8>) -> io::Result<()> { // The first two bytes are expected to be the size of the message. // This means that a message can be at most 65535 characters long. // The most significant byte comes first, and the least significant // byte second. let mut len_bytes = [0; 2]; self.stream .try_clone()? .take(2) .read_exact(&mut len_bytes)?; let len = ((len_bytes[0] as u16) << 8) + len_bytes[1] as u16; let mut msg = vec![0; len as usize].into_boxed_slice(); self.stream.read(&mut msg)?; // To remind myself what this does: We must dereference the Box, to get the // [u8] slice, and then reference it again in order to create a &[u8], since // Rust's automatic deref coercion rules won't do this for you. The reason // we need to do this is because &Box<[u8]> does not implement IntoIterator, // but &[u8] does, and Rust won't just deref to some type that implements it. buf.extend(&*msg); Ok(()) } fn write_bytes(&mut self, buf: &[u8]) -> io::Result<()> { // Somewhere to store the length bytes. let mut len_bytes = [0; 2]; // We need to write the length of the message into a variable. // Since we know that the buf.len() <= 65535, we can safely cast // it to u16. As a sanity check, I'm using try_from() to make sure // that it can be safely cast. let msg_len = u16::try_from(buf.len()).expect("converting to u16 here should always be safe!"); len_bytes[0] = (msg_len >> 8) as u8; len_bytes[1] = (msg_len & 255) as u8; let msg = [&len_bytes[..], &buf[..]].concat().into_boxed_slice(); self.stream.write_all(&msg)?; self.stream.flush()?; Ok(()) } } pub struct Message { pub contents: String, pub from: Id, pub to: Option<Id>, } impl Message { fn new(contents: String, from: Id, to: Option<Id>) -> Message { Message { contents, from, to } } } impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.contents) } }
Ok(Server { size,
random_line_split
main.rs
format: wgpu::VertexFormat::Float4, }, wgpu::VertexAttributeDescriptor { offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, shader_location: 7, format: wgpu::VertexFormat::Float4, }, wgpu::VertexAttributeDescriptor { offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, shader_location: 8, format: wgpu::VertexFormat::Float4, }, ]; return wgpu::VertexBufferDescriptor { stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress, // We need to switch from using a step mode of Vertex to Instance // This means that our shaders will only change to use the next // instance when the shader starts processing a new instance step_mode: wgpu::InputStepMode::Instance, attributes: attrib, }; } } //============================================================================= fn
( device: &wgpu::Device, layout: &wgpu::PipelineLayout, color_format: wgpu::TextureFormat, depth_format: Option<wgpu::TextureFormat>, vertex_descs: &[wgpu::VertexBufferDescriptor], vs_src: wgpu::ShaderModuleSource, fs_src: wgpu::ShaderModuleSource, ) -> wgpu::RenderPipeline { // Create ShaderModules let vs_module = device.create_shader_module(vs_src); let fs_module = device.create_shader_module(fs_src); // Create Render Pipeline let render_pipeline_desc = wgpu::RenderPipelineDescriptor { label: Some("render_pipeline"), layout: Some(&layout), vertex_stage: wgpu::ProgrammableStageDescriptor { module: &vs_module, entry_point: "main", }, fragment_stage: Some(wgpu::ProgrammableStageDescriptor { module: &fs_module, entry_point: "main", }), rasterization_state: Some(wgpu::RasterizationStateDescriptor{ front_face: wgpu::FrontFace::Ccw, cull_mode: wgpu::CullMode::Back, depth_bias: 0, depth_bias_slope_scale: 0.0, depth_bias_clamp: 0.0, clamp_depth: false, }), color_states: &[wgpu::ColorStateDescriptor{ // Define how colors are stored and processed format: color_format, color_blend: wgpu::BlendDescriptor::REPLACE, alpha_blend: wgpu::BlendDescriptor::REPLACE, write_mask: wgpu::ColorWrite::ALL, }], primitive_topology: wgpu::PrimitiveTopology::TriangleList, depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor { format, depth_write_enabled: true, // When to discard a new pixel. Drawn front to back. Depth should be less (closer // to camera) to discard the previous pixel on the texture depth_compare: wgpu::CompareFunction::Less, stencil: wgpu::StencilStateDescriptor::default(), }), vertex_state: wgpu::VertexStateDescriptor { index_format: wgpu::IndexFormat::Uint32, vertex_buffers: vertex_descs, }, sample_count: 1, sample_mask:!0, // Use all samples alpha_to_coverage_enabled: false, }; let render_pipeline = device.create_render_pipeline(&render_pipeline_desc); return render_pipeline; } struct State { surface: wgpu::Surface, device: wgpu::Device, queue: wgpu::Queue, swap_chain_desc: wgpu::SwapChainDescriptor, swap_chain: wgpu::SwapChain, render_pipeline: wgpu::RenderPipeline, obj_model: model::Model, camera: camera::Camera, camera_controller: camera::CameraController, projection: camera::Projection, uniforms: Uniforms, uniform_buffer: wgpu::Buffer, uniform_bind_group: wgpu::BindGroup, instances: Vec<Instance>, light: Light, light_buffer: wgpu::Buffer, light_bind_group: wgpu::BindGroup, light_render_pipeline: wgpu::RenderPipeline, #[allow(dead_code)] instance_buffer: wgpu::Buffer, depth_texture: texture::Texture, size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor mouse_pressed: bool, } impl State { async fn new(window: &Window) -> Self { // The instance is a handle to our GPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY); let surface = unsafe { instance.create_surface(window) }; // Create Adapter let adapter_options = &wgpu::RequestAdapterOptions { // Default gets LowP on battery and HighP when on mains power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), }; // The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU), // and an instance of a browser's implementation of WebGPU on top of the accelerator let adapter = instance.request_adapter(adapter_options).await.unwrap(); // Create Device and Queue let desc = &wgpu::DeviceDescriptor { features: wgpu::Features::empty(), limits: wgpu::Limits::default(), shader_validation: true, }; let (device, queue) = adapter.request_device(desc, None).await.unwrap(); // Create SwapChain let size = window.inner_size(); // INFO: Has into account the scale factor let swap_chain_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, }; let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc); // Describe a set of resources and how are they accessed by a Shader let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("texture_bind_group_layout"), entries: &[ wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 2, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 3, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, ], }; let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc); let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture"); let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0); let camera_controller = camera::CameraController::new(4.0, 1.0); // Create Uniform Buffers let mut uniforms = Uniforms::new(); uniforms.update_view_proj(&camera, &projection); let uniforms_array = &[uniforms]; let uniform_buffer_desc = wgpu::util::BufferInitDescriptor { label: Some("uniform_buffer"), contents: bytemuck::cast_slice(uniforms_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc); // Create Uniform Bind Group let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }; let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{ label: Some("uniform_bind_group_layout"), entries: &[uniform_bind_group_layout_entry] }; let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc); let uniform_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("uniform_bind_group"), layout: &uniform_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)), }], }; let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc); // Load Model let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); let obj_model = model::Model::load( &device, &queue, &texture_bind_group_layout, res_dir.join("cube.obj"), ).unwrap(); // Create Instances const SPACE_BETWEEN: f32 = 3.0; let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| { (0..NUM_INSTANCES_PER_ROW).map(move |x| { let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let position = cgmath::Vector3 { x, y: 0.0, z }; let rotation = if position.is_zero() { cgmath::Quaternion::from_axis_angle( cgmath::Vector3::unit_z(), cgmath::Deg(0.0), ) } else { cgmath::Quaternion::from_axis_angle( position.clone().normalize(), cgmath::Deg(45.0), ) }; Instance { position, rotation } }) }) .collect::<Vec<_>>(); let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>(); let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{ label: Some("instance_buffer"), contents: bytemuck::cast_slice(&instance_data), usage: wgpu::BufferUsage::VERTEX, }); // Create Light // TODO: Change wgpu tutorial from.into() to [f32; 3] let light = Light { position: [2.0, 2.0, 2.0], _padding: 0, color: [1.0, 1.0, 1.0], }; let lights_array = &[light]; let light_buffer_init_desc = wgpu::util::BufferInitDescriptor { label: Some("light_buffer_init"), contents: bytemuck::cast_slice(lights_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let light_buffer = device.create_buffer_init(&light_buffer_init_desc); let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("light_bind_group_layout"), entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }], }; let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc); let light_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("light_bind_group"), layout: &light_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)), }], }; let light_bind_group = device.create_bind_group(&light_bind_group_desc); // Create Pipeline Layout let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor { label: Some("pipeline_layout"), bind_group_layouts: &[ &texture_bind_group_layout, &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }; let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc); let render_pipeline = create_render_pipeline( &device, &render_pipeline_layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc(), InstanceRaw::desc()], wgpu::include_spirv!("../shaders/shader.vert.spv"), wgpu::include_spirv!("../shaders/shader.frag.spv"), ); let light_render_pipeline = { let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("light_render_pipeline_layout"), bind_group_layouts: &[ &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }); let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv"); let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv"); create_render_pipeline( &device, &layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc()], vs_src, fs_src, ) }; return State { surface, device, queue, swap_chain_desc, swap_chain, render_pipeline, obj_model, camera, camera_controller, projection, uniforms, uniform_buffer, uniform_bind_group, instances, light, light_buffer, light_bind_group, light_render_pipeline, instance_buffer, depth_texture, size, mouse_pressed: false, }; } fn resize(&mut self, new_size: PhysicalSize<u32>) { self.size = new_size; self.swap_chain_desc.width = new_size.width; self.swap_chain_desc.height = new_size.height; self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc); self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture"); self.projection.resize(new_size.width, new_size.height); } // Returns a bool to indicate whether an event has been fully processed. If `true` the main // loop won't process the event any further fn input(&mut self, event: &DeviceEvent) -> bool { match event { DeviceEvent::MouseWheel { delta,.. } => { self.camera_controller.process_scroll(delta); true } DeviceEvent::Button { button: 1, // Left Mouse Button state, } => { self.mouse_pressed = *state == ElementState::Pressed; true } DeviceEvent::MouseMotion { delta } => { if self.mouse_pressed { self.camera_controller.process_mouse(delta.0, delta.1); } true } DeviceEvent::Motion {.. } => { false } _ => false, } } fn update(&mut self, dt: std::time::Duration) { self.camera_controller.update_camera(&mut self.camera, dt); self.uniforms.update_view_proj(&self.camera, &self.projection); self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms])); // Update light's position let old_position: cgmath::Vector3<f32> = self.light.position.into(); let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position; self.light.position = new_position.into(); self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light])); } fn render(&mut self) -> Result<(), wgpu::SwapChainError> { // Get next frame let frame = self.swap_chain.get_current_frame()?.output; // Create command encoder let command_encoder_desc = wgpu::CommandEncoderDescriptor {
create_render_pipeline
identifier_name
main.rs
swap_chain: wgpu::SwapChain, render_pipeline: wgpu::RenderPipeline, obj_model: model::Model, camera: camera::Camera, camera_controller: camera::CameraController, projection: camera::Projection, uniforms: Uniforms, uniform_buffer: wgpu::Buffer, uniform_bind_group: wgpu::BindGroup, instances: Vec<Instance>, light: Light, light_buffer: wgpu::Buffer, light_bind_group: wgpu::BindGroup, light_render_pipeline: wgpu::RenderPipeline, #[allow(dead_code)] instance_buffer: wgpu::Buffer, depth_texture: texture::Texture, size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor mouse_pressed: bool, } impl State { async fn new(window: &Window) -> Self { // The instance is a handle to our GPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY); let surface = unsafe { instance.create_surface(window) }; // Create Adapter let adapter_options = &wgpu::RequestAdapterOptions { // Default gets LowP on battery and HighP when on mains power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), }; // The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU), // and an instance of a browser's implementation of WebGPU on top of the accelerator let adapter = instance.request_adapter(adapter_options).await.unwrap(); // Create Device and Queue let desc = &wgpu::DeviceDescriptor { features: wgpu::Features::empty(), limits: wgpu::Limits::default(), shader_validation: true, }; let (device, queue) = adapter.request_device(desc, None).await.unwrap(); // Create SwapChain let size = window.inner_size(); // INFO: Has into account the scale factor let swap_chain_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, }; let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc); // Describe a set of resources and how are they accessed by a Shader let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("texture_bind_group_layout"), entries: &[ wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 2, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 3, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, ], }; let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc); let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture"); let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0); let camera_controller = camera::CameraController::new(4.0, 1.0); // Create Uniform Buffers let mut uniforms = Uniforms::new(); uniforms.update_view_proj(&camera, &projection); let uniforms_array = &[uniforms]; let uniform_buffer_desc = wgpu::util::BufferInitDescriptor { label: Some("uniform_buffer"), contents: bytemuck::cast_slice(uniforms_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc); // Create Uniform Bind Group let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }; let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{ label: Some("uniform_bind_group_layout"), entries: &[uniform_bind_group_layout_entry] }; let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc); let uniform_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("uniform_bind_group"), layout: &uniform_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)), }], }; let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc); // Load Model let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); let obj_model = model::Model::load( &device, &queue, &texture_bind_group_layout, res_dir.join("cube.obj"), ).unwrap(); // Create Instances const SPACE_BETWEEN: f32 = 3.0; let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| { (0..NUM_INSTANCES_PER_ROW).map(move |x| { let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let position = cgmath::Vector3 { x, y: 0.0, z }; let rotation = if position.is_zero() { cgmath::Quaternion::from_axis_angle( cgmath::Vector3::unit_z(), cgmath::Deg(0.0), ) } else { cgmath::Quaternion::from_axis_angle( position.clone().normalize(), cgmath::Deg(45.0), ) }; Instance { position, rotation } }) }) .collect::<Vec<_>>(); let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>(); let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{ label: Some("instance_buffer"), contents: bytemuck::cast_slice(&instance_data), usage: wgpu::BufferUsage::VERTEX, }); // Create Light // TODO: Change wgpu tutorial from.into() to [f32; 3] let light = Light { position: [2.0, 2.0, 2.0], _padding: 0, color: [1.0, 1.0, 1.0], }; let lights_array = &[light]; let light_buffer_init_desc = wgpu::util::BufferInitDescriptor { label: Some("light_buffer_init"), contents: bytemuck::cast_slice(lights_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let light_buffer = device.create_buffer_init(&light_buffer_init_desc); let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("light_bind_group_layout"), entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }], }; let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc); let light_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("light_bind_group"), layout: &light_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)), }], }; let light_bind_group = device.create_bind_group(&light_bind_group_desc); // Create Pipeline Layout let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor { label: Some("pipeline_layout"), bind_group_layouts: &[ &texture_bind_group_layout, &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }; let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc); let render_pipeline = create_render_pipeline( &device, &render_pipeline_layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc(), InstanceRaw::desc()], wgpu::include_spirv!("../shaders/shader.vert.spv"), wgpu::include_spirv!("../shaders/shader.frag.spv"), ); let light_render_pipeline = { let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("light_render_pipeline_layout"), bind_group_layouts: &[ &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }); let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv"); let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv"); create_render_pipeline( &device, &layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc()], vs_src, fs_src, ) }; return State { surface, device, queue, swap_chain_desc, swap_chain, render_pipeline, obj_model, camera, camera_controller, projection, uniforms, uniform_buffer, uniform_bind_group, instances, light, light_buffer, light_bind_group, light_render_pipeline, instance_buffer, depth_texture, size, mouse_pressed: false, }; } fn resize(&mut self, new_size: PhysicalSize<u32>) { self.size = new_size; self.swap_chain_desc.width = new_size.width; self.swap_chain_desc.height = new_size.height; self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc); self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture"); self.projection.resize(new_size.width, new_size.height); } // Returns a bool to indicate whether an event has been fully processed. If `true` the main // loop won't process the event any further fn input(&mut self, event: &DeviceEvent) -> bool { match event { DeviceEvent::MouseWheel { delta,.. } => { self.camera_controller.process_scroll(delta); true } DeviceEvent::Button { button: 1, // Left Mouse Button state, } => { self.mouse_pressed = *state == ElementState::Pressed; true } DeviceEvent::MouseMotion { delta } => { if self.mouse_pressed { self.camera_controller.process_mouse(delta.0, delta.1); } true } DeviceEvent::Motion {.. } => { false } _ => false, } } fn update(&mut self, dt: std::time::Duration) { self.camera_controller.update_camera(&mut self.camera, dt); self.uniforms.update_view_proj(&self.camera, &self.projection); self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms])); // Update light's position let old_position: cgmath::Vector3<f32> = self.light.position.into(); let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position; self.light.position = new_position.into(); self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light])); } fn render(&mut self) -> Result<(), wgpu::SwapChainError> { // Get next frame let frame = self.swap_chain.get_current_frame()?.output; // Create command encoder let command_encoder_desc = wgpu::CommandEncoderDescriptor { label: Some("command_encoder"), }; let mut encoder = self.device.create_command_encoder(&command_encoder_desc); { // Create Render Pass let clear_color = wgpu::Color { r: 0.1, g: 0.1, b: 0.1, a: 1.0, }; let render_pass_desc = wgpu::RenderPassDescriptor { // Color Attachments color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor { attachment: &frame.view, // Current frame texture view resolve_target: None, // Only used if multisampling is enabled ops: wgpu::Operations { load: wgpu::LoadOp::Clear(clear_color), store: true, }, }], // Depth Stencil Attachments depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor { attachment: &self.depth_texture.view, depth_ops: Some(wgpu::Operations { load: wgpu::LoadOp::Clear(1.0), // Clear before use store: true, // Render Pass will write here: true }), stencil_ops: None, }), }; let mut render_pass = encoder.begin_render_pass(&render_pass_desc); render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); render_pass.set_pipeline(&self.light_render_pipeline); // TODO: Refactor draw_light_model to light_model render_pass.draw_light_model( &self.obj_model, &self.uniform_bind_group, &self.light_bind_group, ); render_pass.set_pipeline(&self.render_pipeline); render_pass.draw_model_instanced( &self.obj_model, 0..self.instances.len() as u32, &self.uniform_bind_group, &self.light_bind_group, ); } self.queue.submit(std::iter::once(encoder.finish())); return Ok(()); } } fn handle_keyboard_input(state: &mut State, input: KeyboardInput, control_flow: &mut ControlFlow) { match input { KeyboardInput { virtual_keycode: key, state: element_state,.. } => { match (key, element_state) { (Some(VirtualKeyCode::Escape), ElementState::Pressed) => { *control_flow = ControlFlow::Exit; } (Some(_), _) => { state.camera_controller.process_keyboard(key.unwrap(), element_state); } _ => {} } } } } fn handle_window_events(state: &mut State, event: WindowEvent, control_flow: &mut ControlFlow) { match event { WindowEvent::KeyboardInput { input,../*device_id, is_synthetic*/ } => { handle_keyboard_input(state, input, control_flow); }, WindowEvent::Resized(physical_size) => {
state.resize(physical_size) }, WindowEvent::ScaleFactorChanged {new_inner_size, ../*scale_factor*/ } => { state.resize(*new_inner_size) },
random_line_split
main.rs
format: wgpu::VertexFormat::Float4, }, wgpu::VertexAttributeDescriptor { offset: mem::size_of::<[f32; 8]>() as wgpu::BufferAddress, shader_location: 7, format: wgpu::VertexFormat::Float4, }, wgpu::VertexAttributeDescriptor { offset: mem::size_of::<[f32; 12]>() as wgpu::BufferAddress, shader_location: 8, format: wgpu::VertexFormat::Float4, }, ]; return wgpu::VertexBufferDescriptor { stride: mem::size_of::<InstanceRaw>() as wgpu::BufferAddress, // We need to switch from using a step mode of Vertex to Instance // This means that our shaders will only change to use the next // instance when the shader starts processing a new instance step_mode: wgpu::InputStepMode::Instance, attributes: attrib, }; } } //============================================================================= fn create_render_pipeline( device: &wgpu::Device, layout: &wgpu::PipelineLayout, color_format: wgpu::TextureFormat, depth_format: Option<wgpu::TextureFormat>, vertex_descs: &[wgpu::VertexBufferDescriptor], vs_src: wgpu::ShaderModuleSource, fs_src: wgpu::ShaderModuleSource, ) -> wgpu::RenderPipeline { // Create ShaderModules let vs_module = device.create_shader_module(vs_src); let fs_module = device.create_shader_module(fs_src); // Create Render Pipeline let render_pipeline_desc = wgpu::RenderPipelineDescriptor { label: Some("render_pipeline"), layout: Some(&layout), vertex_stage: wgpu::ProgrammableStageDescriptor { module: &vs_module, entry_point: "main", }, fragment_stage: Some(wgpu::ProgrammableStageDescriptor { module: &fs_module, entry_point: "main", }), rasterization_state: Some(wgpu::RasterizationStateDescriptor{ front_face: wgpu::FrontFace::Ccw, cull_mode: wgpu::CullMode::Back, depth_bias: 0, depth_bias_slope_scale: 0.0, depth_bias_clamp: 0.0, clamp_depth: false, }), color_states: &[wgpu::ColorStateDescriptor{ // Define how colors are stored and processed format: color_format, color_blend: wgpu::BlendDescriptor::REPLACE, alpha_blend: wgpu::BlendDescriptor::REPLACE, write_mask: wgpu::ColorWrite::ALL, }], primitive_topology: wgpu::PrimitiveTopology::TriangleList, depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor { format, depth_write_enabled: true, // When to discard a new pixel. Drawn front to back. Depth should be less (closer // to camera) to discard the previous pixel on the texture depth_compare: wgpu::CompareFunction::Less, stencil: wgpu::StencilStateDescriptor::default(), }), vertex_state: wgpu::VertexStateDescriptor { index_format: wgpu::IndexFormat::Uint32, vertex_buffers: vertex_descs, }, sample_count: 1, sample_mask:!0, // Use all samples alpha_to_coverage_enabled: false, }; let render_pipeline = device.create_render_pipeline(&render_pipeline_desc); return render_pipeline; } struct State { surface: wgpu::Surface, device: wgpu::Device, queue: wgpu::Queue, swap_chain_desc: wgpu::SwapChainDescriptor, swap_chain: wgpu::SwapChain, render_pipeline: wgpu::RenderPipeline, obj_model: model::Model, camera: camera::Camera, camera_controller: camera::CameraController, projection: camera::Projection, uniforms: Uniforms, uniform_buffer: wgpu::Buffer, uniform_bind_group: wgpu::BindGroup, instances: Vec<Instance>, light: Light, light_buffer: wgpu::Buffer, light_bind_group: wgpu::BindGroup, light_render_pipeline: wgpu::RenderPipeline, #[allow(dead_code)] instance_buffer: wgpu::Buffer, depth_texture: texture::Texture, size: PhysicalSize<u32>, // INFO: PhysicalSize takes into account device's scale factor mouse_pressed: bool, } impl State { async fn new(window: &Window) -> Self { // The instance is a handle to our GPU // BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY); let surface = unsafe { instance.create_surface(window) }; // Create Adapter let adapter_options = &wgpu::RequestAdapterOptions { // Default gets LowP on battery and HighP when on mains power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), }; // The adapter identifies both an instance of a physical hardware accelerator (CPU, GPU), // and an instance of a browser's implementation of WebGPU on top of the accelerator let adapter = instance.request_adapter(adapter_options).await.unwrap(); // Create Device and Queue let desc = &wgpu::DeviceDescriptor { features: wgpu::Features::empty(), limits: wgpu::Limits::default(), shader_validation: true, }; let (device, queue) = adapter.request_device(desc, None).await.unwrap(); // Create SwapChain let size = window.inner_size(); // INFO: Has into account the scale factor let swap_chain_desc = wgpu::SwapChainDescriptor { usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT, format: wgpu::TextureFormat::Bgra8UnormSrgb, // TODO: Should be swap_chain_get_current_texture_view but not available atm width: size.width, height: size.height, present_mode: wgpu::PresentMode::Fifo, }; let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc); // Describe a set of resources and how are they accessed by a Shader let texture_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("texture_bind_group_layout"), entries: &[ wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, // Bitwise comparison ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 2, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::SampledTexture { multisampled: false, dimension: wgpu::TextureViewDimension::D2, component_type: wgpu::TextureComponentType::Uint, }, count: None, }, wgpu::BindGroupLayoutEntry { binding: 3, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, }, count: None, }, ], }; let texture_bind_group_layout = device.create_bind_group_layout(&texture_bind_group_layout_desc); let depth_texture = texture::Texture::create_depth_texture(&device, &swap_chain_desc, "depth_texture"); let camera = camera::Camera::new((0.0, 5.0, 10.0), cgmath::Deg(-90.0), cgmath::Deg(-20.0)); let projection = camera::Projection::new(swap_chain_desc.width, swap_chain_desc.height, cgmath::Deg(45.0), 0.1, 100.0); let camera_controller = camera::CameraController::new(4.0, 1.0); // Create Uniform Buffers let mut uniforms = Uniforms::new(); uniforms.update_view_proj(&camera, &projection); let uniforms_array = &[uniforms]; let uniform_buffer_desc = wgpu::util::BufferInitDescriptor { label: Some("uniform_buffer"), contents: bytemuck::cast_slice(uniforms_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let uniform_buffer = device.create_buffer_init(&uniform_buffer_desc); // Create Uniform Bind Group let uniform_bind_group_layout_entry = wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }; let uniform_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor{ label: Some("uniform_bind_group_layout"), entries: &[uniform_bind_group_layout_entry] }; let uniform_bind_group_layout = device.create_bind_group_layout(&uniform_bind_group_layout_desc); let uniform_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("uniform_bind_group"), layout: &uniform_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)), }], }; let uniform_bind_group = device.create_bind_group(&uniform_bind_group_desc); // Load Model let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res"); let obj_model = model::Model::load( &device, &queue, &texture_bind_group_layout, res_dir.join("cube.obj"), ).unwrap(); // Create Instances const SPACE_BETWEEN: f32 = 3.0; let instances = (0..NUM_INSTANCES_PER_ROW).flat_map(|z| { (0..NUM_INSTANCES_PER_ROW).map(move |x| { let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0); let position = cgmath::Vector3 { x, y: 0.0, z }; let rotation = if position.is_zero() { cgmath::Quaternion::from_axis_angle( cgmath::Vector3::unit_z(), cgmath::Deg(0.0), ) } else { cgmath::Quaternion::from_axis_angle( position.clone().normalize(), cgmath::Deg(45.0), ) }; Instance { position, rotation } }) }) .collect::<Vec<_>>(); let instance_data = instances.iter().map(Instance::to_raw).collect::<Vec<InstanceRaw>>(); let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor{ label: Some("instance_buffer"), contents: bytemuck::cast_slice(&instance_data), usage: wgpu::BufferUsage::VERTEX, }); // Create Light // TODO: Change wgpu tutorial from.into() to [f32; 3] let light = Light { position: [2.0, 2.0, 2.0], _padding: 0, color: [1.0, 1.0, 1.0], }; let lights_array = &[light]; let light_buffer_init_desc = wgpu::util::BufferInitDescriptor { label: Some("light_buffer_init"), contents: bytemuck::cast_slice(lights_array), usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST, }; let light_buffer = device.create_buffer_init(&light_buffer_init_desc); let light_bind_group_layout_desc = wgpu::BindGroupLayoutDescriptor { label: Some("light_bind_group_layout"), entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::UniformBuffer { dynamic: false, min_binding_size: None, }, count: None, }], }; let light_bind_group_layout = device.create_bind_group_layout(&light_bind_group_layout_desc); let light_bind_group_desc = wgpu::BindGroupDescriptor { label: Some("light_bind_group"), layout: &light_bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)), }], }; let light_bind_group = device.create_bind_group(&light_bind_group_desc); // Create Pipeline Layout let pipeline_layout_desc = wgpu::PipelineLayoutDescriptor { label: Some("pipeline_layout"), bind_group_layouts: &[ &texture_bind_group_layout, &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }; let render_pipeline_layout = device.create_pipeline_layout(&pipeline_layout_desc); let render_pipeline = create_render_pipeline( &device, &render_pipeline_layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc(), InstanceRaw::desc()], wgpu::include_spirv!("../shaders/shader.vert.spv"), wgpu::include_spirv!("../shaders/shader.frag.spv"), ); let light_render_pipeline = { let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("light_render_pipeline_layout"), bind_group_layouts: &[ &uniform_bind_group_layout, &light_bind_group_layout, ], push_constant_ranges: &[], }); let vs_src = wgpu::include_spirv!("../shaders/light.vert.spv"); let fs_src = wgpu::include_spirv!("../shaders/light.frag.spv"); create_render_pipeline( &device, &layout, swap_chain_desc.format, Some(texture::Texture::DEPTH_FORMAT), &[model::ModelVertex::desc()], vs_src, fs_src, ) }; return State { surface, device, queue, swap_chain_desc, swap_chain, render_pipeline, obj_model, camera, camera_controller, projection, uniforms, uniform_buffer, uniform_bind_group, instances, light, light_buffer, light_bind_group, light_render_pipeline, instance_buffer, depth_texture, size, mouse_pressed: false, }; } fn resize(&mut self, new_size: PhysicalSize<u32>) { self.size = new_size; self.swap_chain_desc.width = new_size.width; self.swap_chain_desc.height = new_size.height; self.swap_chain = self.device.create_swap_chain(&self.surface, &self.swap_chain_desc); self.depth_texture = texture::Texture::create_depth_texture(&self.device, &self.swap_chain_desc, "depth_texture"); self.projection.resize(new_size.width, new_size.height); } // Returns a bool to indicate whether an event has been fully processed. If `true` the main // loop won't process the event any further fn input(&mut self, event: &DeviceEvent) -> bool { match event { DeviceEvent::MouseWheel { delta,.. } =>
DeviceEvent::Button { button: 1, // Left Mouse Button state, } => { self.mouse_pressed = *state == ElementState::Pressed; true } DeviceEvent::MouseMotion { delta } => { if self.mouse_pressed { self.camera_controller.process_mouse(delta.0, delta.1); } true } DeviceEvent::Motion {.. } => { false } _ => false, } } fn update(&mut self, dt: std::time::Duration) { self.camera_controller.update_camera(&mut self.camera, dt); self.uniforms.update_view_proj(&self.camera, &self.projection); self.queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[self.uniforms])); // Update light's position let old_position: cgmath::Vector3<f32> = self.light.position.into(); let new_position = cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(60.0 * dt.as_secs_f32())) * old_position; self.light.position = new_position.into(); self.queue.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light])); } fn render(&mut self) -> Result<(), wgpu::SwapChainError> { // Get next frame let frame = self.swap_chain.get_current_frame()?.output; // Create command encoder let command_encoder_desc = wgpu::CommandEncoderDescriptor {
{ self.camera_controller.process_scroll(delta); true }
conditional_block
lib.rs
#![recursion_limit = "256"] extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2; use quote::quote; use regex::Regex; use std::collections::HashSet; use syn::{parse_macro_input, DeriveInput}; #[derive(Debug, PartialEq)] enum RouteToRegexError { MissingLeadingForwardSlash, NonAsciiChars, InvalidIdentifier(String), InvalidTrailingSlash, CharactersAfterWildcard, } fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> { enum ParseState { Initial, Static, VarName(String), WildcardFound, }; if!route.is_ascii() { return Err(RouteToRegexError::NonAsciiChars); } let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap(); let mut regex = "".to_string(); let mut format_str = "".to_string(); let mut parse_state = ParseState::Initial; for byte in route.chars() { match parse_state { ParseState::Initial => { if byte!= '/' { return Err(RouteToRegexError::MissingLeadingForwardSlash); } regex += "^/"; format_str += "/"; parse_state = ParseState::Static; } ParseState::Static => { if byte == ':' { format_str.push('{'); parse_state = ParseState::VarName("".to_string()); } else { regex.push(byte); format_str.push(byte); parse_state = ParseState::Static; } } ParseState::VarName(mut name) => { if byte == '/' { // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>[^/]+)/", name); format_str += &format!("{}}}/", name); parse_state = ParseState::Static; } else if byte == '*' { // Found a wildcard - add the var name to the regex // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>.*)", name); format_str += &format!("{}}}", name); parse_state = ParseState::WildcardFound; } else { name.push(byte); parse_state = ParseState::VarName(name); } } ParseState::WildcardFound => { return Err(RouteToRegexError::CharactersAfterWildcard); } }; } if let ParseState::VarName(name) = parse_state { regex += &format!("(?P<{}>[^/]+)", name); format_str += &format!("{}}}", name); } if regex.ends_with('/') { return Err(RouteToRegexError::InvalidTrailingSlash); } regex += "$"; Ok((regex, format_str)) } #[test] fn test_route_to_regex() { let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap(); assert_eq!( regex, r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$" ); } #[test] fn test_route_to_regex_no_path_params() { let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap(); assert_eq!(regex, r"^/p/exams/submissions_expired$"); } #[test] fn test_route_to_regex_no_leading_slash() { let regex = route_to_regex("p/exams/submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash)); } #[test] fn test_route_to_regex_non_ascii_chars() { let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars)); } #[test] fn test_route_to_regex_invalid_ident() { let regex =
st_route_to_regex_characters_after_wildcard() { let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::CharactersAfterWildcard) ); } #[test] fn test_route_to_regex_invalid_ending() { let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/"); assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash)); } fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::List(ref list)) = attr { if list.ident == name { for thing in &list.nested { if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing { return Some(str_lit.value()); } } } } } None } fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::Word(ref ident)) = attr { if ident == name { return true; } } } false } fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> { match data { syn::Data::Struct(data_struct) => match data_struct.fields { syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(), _ => panic!("Struct fields must be named"), }, _ => panic!("AppRoute derive is only supported for structs"), } } fn field_is_option(field: &syn::Field) -> bool { match field.ty { syn::Type::Path(ref type_path) => type_path .path .segments .iter() .last() .map(|segment| segment.ident == "Option") .unwrap_or(false), _ => false, } } #[proc_macro_derive(AppRoute, attributes(route, query))] pub fn app_route_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); let struct_fields = get_struct_fields(&input.data); let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields .into_iter() .partition(|f|!has_flag_attr("query", &f.attrs)); let name = &input.ident; let generics = input.generics; let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let route_string = get_string_attr("route", &input.attrs); let url_route = route_string.expect( "derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct", ); let (route_regex_str, format_str) = route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex"); // Validate route_regex and make sure struct and route have matching fields let route_regex = Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex"); let regex_capture_names_set: HashSet<String> = route_regex .capture_names() .filter_map(|c_opt| c_opt.map(|c| c.to_string())) .collect(); let field_names_set: HashSet<String> = route_fields .clone() .into_iter() .map(|f| f.ident.unwrap().to_string()) .collect(); if regex_capture_names_set!= field_names_set { let missing_from_route = field_names_set.difference(&regex_capture_names_set); let missing_from_struct = regex_capture_names_set.difference(&field_names_set); let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct); panic!(error_msg); } let route_field_assignments = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); let f_ident_str = f_ident.to_string(); quote! { #f_ident: captures[#f_ident_str].parse().map_err(|e| { RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e)) })? } }); let query_field_assignments = query_fields.clone().into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { #f_ident: query_string.and_then(|q| qs::from_str(q).ok()) } } else { quote! { #f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))? } } }); let route_field_parsers = quote! { #( #route_field_assignments ),* }; let query_field_parsers = quote! { #( #query_field_assignments ),* }; let format_args = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); quote! { #f_ident = self.#f_ident } }); let format_args = quote! { #( #format_args ),* }; let query_field_to_string_statements = query_fields.into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok()) } } else { quote! { qs::to_string(&self.#f_ident).ok() } } }); let encoded_query_fields = quote! { #( #query_field_to_string_statements ),* }; let struct_constructor = match ( route_field_parsers.is_empty(), query_field_parsers.is_empty(), ) { (true, true) => quote! { #name {} }, (true, false) => quote! { #name { #query_field_parsers } }, (false, true) => quote! { #name { #route_field_parsers } }, (false, false) => quote! { #name { #route_field_parsers, #query_field_parsers } }, }; let app_route_impl = quote! { impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause { fn path_pattern() -> String { #route_regex_str.to_string() } fn query_string(&self) -> Option<String> { use app_route::serde_qs as qs; // TODO - Remove duplicates because // there could be multiple fields with // a #[query] attribute that have common fields // TODO - can this be done with an on-stack array? let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields]; let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect(); if!filtered.is_empty() { Some(filtered.join("&")) } else { None } } } impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(query) = self.query_string() { let path = format!( #format_str, #format_args ); write!(f, "{}?{}", path, query) } else { write!( f, #format_str, #format_args ) } } } impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause { type Err = app_route::RouteParseErr; fn from_str(app_path: &str) -> Result<Self, Self::Err> { use app_route::serde_qs as qs; use app_route::RouteParseErr; app_route::lazy_static! { static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex"); } let question_pos = app_path.find('?'); let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))]; let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?; let query_string = question_pos.map(|question_pos| { let mut query_string = &app_path[question_pos..]; if query_string.starts_with('?') { query_string = &query_string[1..]; } query_string }); Ok(#struct_constructor) } } }; let impl_wrapper = syn::Ident::new( &format!("_IMPL_APPROUTE_FOR_{}", name.to_string()), proc_macro2::Span::call_site(), ); let out = quote! { const #impl_wrapper: () = { extern crate app_route; #app_route_impl }; }; out.into() }
route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string())) ); } #[test] fn te
identifier_body
lib.rs
#![recursion_limit = "256"] extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2; use quote::quote; use regex::Regex; use std::collections::HashSet; use syn::{parse_macro_input, DeriveInput}; #[derive(Debug, PartialEq)] enum RouteToRegexError { MissingLeadingForwardSlash, NonAsciiChars, InvalidIdentifier(String), InvalidTrailingSlash, CharactersAfterWildcard, } fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> { enum ParseState { Initial, Static, VarName(String), WildcardFound, }; if!route.is_ascii() { return Err(RouteToRegexError::NonAsciiChars); } let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap(); let mut regex = "".to_string(); let mut format_str = "".to_string(); let mut parse_state = ParseState::Initial; for byte in route.chars() { match parse_state { ParseState::Initial => { if byte!= '/' { return Err(RouteToRegexError::MissingLeadingForwardSlash); } regex += "^/"; format_str += "/"; parse_state = ParseState::Static; } ParseState::Static => { if byte == ':' { format_str.push('{'); parse_state = ParseState::VarName("".to_string()); } else { regex.push(byte); format_str.push(byte); parse_state = ParseState::Static; } } ParseState::VarName(mut name) => { if byte == '/' { // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>[^/]+)/", name); format_str += &format!("{}}}/", name); parse_state = ParseState::Static; } else if byte == '*' { // Found a wildcard - add the var name to the regex // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>.*)", name); format_str += &format!("{}}}", name); parse_state = ParseState::WildcardFound; } else { name.push(byte); parse_state = ParseState::VarName(name); } } ParseState::WildcardFound => { return Err(RouteToRegexError::CharactersAfterWildcard); } }; } if let ParseState::VarName(name) = parse_state { regex += &format!("(?P<{}>[^/]+)", name); format_str += &format!("{}}}", name); } if regex.ends_with('/') { return Err(RouteToRegexError::InvalidTrailingSlash); } regex += "$"; Ok((regex, format_str)) } #[test] fn test_route_to_regex() { let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap(); assert_eq!( regex, r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$" ); } #[test] fn test_route_to_regex_no_path_params() { let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap(); assert_eq!(regex, r"^/p/exams/submissions_expired$"); } #[test] fn test_route_to_regex_no_leading_slash() { let regex = route_to_regex("p/exams/submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash)); } #[test] fn test_route_to_regex_non_ascii_chars() { let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars)); } #[test] fn test_route_to_regex_invalid_ident() { let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string())) ); } #[test] fn test_route_to_regex_characters_after_wildcard() { let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::CharactersAfterWildcard) ); } #[test] fn test_route_to_regex_invalid_ending() { let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/"); assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash)); } fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::List(ref list)) = attr { if list.ident == name { for thing in &list.nested { if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing { return Some(str_lit.value()); } } } } } None } fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::Word(ref ident)) = attr { if ident == name { return true; } } } false } fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> { match data { syn::Data::Struct(data_struct) => match data_struct.fields { syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(), _ => panic!("Struct fields must be named"), }, _ => panic!("AppRoute derive is only supported for structs"), } } fn field_is_option(field: &syn::Field) -> bool { match field.ty { syn::Type::Path(ref type_path) => type_path .path .segments .iter() .last() .map(|segment| segment.ident == "Option") .unwrap_or(false), _ => false, } } #[proc_macro_derive(AppRoute, attributes(route, query))] pub fn app_route_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); let struct_fields = get_struct_fields(&input.data); let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields .into_iter() .partition(|f|!has_flag_attr("query", &f.attrs)); let name = &input.ident; let generics = input.generics; let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let route_string = get_string_attr("route", &input.attrs); let url_route = route_string.expect( "derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct", ); let (route_regex_str, format_str) = route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex"); // Validate route_regex and make sure struct and route have matching fields let route_regex = Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex"); let regex_capture_names_set: HashSet<String> = route_regex .capture_names() .filter_map(|c_opt| c_opt.map(|c| c.to_string())) .collect(); let field_names_set: HashSet<String> = route_fields .clone() .into_iter() .map(|f| f.ident.unwrap().to_string()) .collect(); if regex_capture_names_set!= field_names_set { let missing_from_route = field_names_set.difference(&regex_capture_names_set); let missing_from_struct = regex_capture_names_set.difference(&field_names_set); let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct); panic!(error_msg); } let route_field_assignments = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); let f_ident_str = f_ident.to_string(); quote! { #f_ident: captures[#f_ident_str].parse().map_err(|e| { RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e)) })? } }); let query_field_assignments = query_fields.clone().into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { #f_ident: query_string.and_then(|q| qs::from_str(q).ok()) } } else { quote! { #f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))? } } }); let route_field_parsers = quote! { #( #route_field_assignments ),* }; let query_field_parsers = quote! { #( #query_field_assignments ),* }; let format_args = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); quote! { #f_ident = self.#f_ident } }); let format_args = quote! { #( #format_args ),* }; let query_field_to_string_statements = query_fields.into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok()) } } else { quote! { qs::to_string(&self.#f_ident).ok() } } }); let encoded_query_fields = quote! { #( #query_field_to_string_statements ),* }; let struct_constructor = match ( route_field_parsers.is_empty(), query_field_parsers.is_empty(), ) { (true, true) => quote! { #name {} }, (true, false) => quote! { #name { #query_field_parsers } }, (false, true) => quote! { #name { #route_field_parsers } }, (false, false) => quote! { #name { #route_field_parsers, #query_field_parsers } }, }; let app_route_impl = quote! { impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause { fn path_pattern() -> String { #route_regex_str.to_string() } fn query_string(&self) -> Option<String> { use app_route::serde_qs as qs; // TODO - Remove duplicates because // there could be multiple fields with // a #[query] attribute that have common fields // TODO - can this be done with an on-stack array? let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields]; let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect(); if!filtered.is_empty() { Some(filtered.join("&")) } else { None } } } impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(query) = self.query_string() { let path = format!( #format_str, #format_args ); write!(f, "{}?{}", path, query) } else { write!( f, #format_str, #format_args ) } } } impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause { type Err = app_route::RouteParseErr; fn from_str(app_path: &str) -> Result<Self, Self::Err> { use app_route::serde_qs as qs; use app_route::RouteParseErr; app_route::lazy_static! { static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex"); } let question_pos = app_path.find('?'); let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))]; let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?; let query_string = question_pos.map(|question_pos| { let mut query_string = &app_path[question_pos..];
} query_string }); Ok(#struct_constructor) } } }; let impl_wrapper = syn::Ident::new( &format!("_IMPL_APPROUTE_FOR_{}", name.to_string()), proc_macro2::Span::call_site(), ); let out = quote! { const #impl_wrapper: () = { extern crate app_route; #app_route_impl }; }; out.into() }
if query_string.starts_with('?') { query_string = &query_string[1..];
random_line_split
lib.rs
#![recursion_limit = "256"] extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2; use quote::quote; use regex::Regex; use std::collections::HashSet; use syn::{parse_macro_input, DeriveInput}; #[derive(Debug, PartialEq)] enum RouteToRegexError { MissingLeadingForwardSlash, NonAsciiChars, InvalidIdentifier(String), InvalidTrailingSlash, CharactersAfterWildcard, } fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> { enum ParseState { Initial, Static, VarName(String), WildcardFound, }; if!route.is_ascii() { return Err(RouteToRegexError::NonAsciiChars); } let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap(); let mut regex = "".to_string(); let mut format_str = "".to_string(); let mut parse_state = ParseState::Initial; for byte in route.chars() { match parse_state { ParseState::Initial => { if byte!= '/' { return Err(RouteToRegexError::MissingLeadingForwardSlash); } regex += "^/"; format_str += "/"; parse_state = ParseState::Static; } ParseState::Static => { if byte == ':' { format_str.push('{'); parse_state = ParseState::VarName("".to_string()); } else { regex.push(byte); format_str.push(byte); parse_state = ParseState::Static; } } ParseState::VarName(mut name) => { if byte == '/' { // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>[^/]+)/", name); format_str += &format!("{}}}/", name); parse_state = ParseState::Static; } else if byte == '*' { // Found a wildcard - add the var name to the regex // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>.*)", name); format_str += &format!("{}}}", name); parse_state = ParseState::WildcardFound; } else { name.push(byte); parse_state = ParseState::VarName(name); } } ParseState::WildcardFound => { return Err(RouteToRegexError::CharactersAfterWildcard); } }; } if let ParseState::VarName(name) = parse_state { regex += &format!("(?P<{}>[^/]+)", name); format_str += &format!("{}}}", name); } if regex.ends_with('/') { return Err(RouteToRegexError::InvalidTrailingSlash); } regex += "$"; Ok((regex, format_str)) } #[test] fn test_route_to_regex() { let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap(); assert_eq!( regex, r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$" ); } #[test] fn test_route_to_regex_no_path_params() { let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap(); assert_eq!(regex, r"^/p/exams/submissions_expired$"); } #[test] fn test_route_to_regex_no_leading_slash() { let regex = route_to_regex("p/exams/submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash)); } #[test] fn test_route_to_regex_non_ascii_chars() { let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars)); } #[test] fn test_route_to_regex_invalid_ident() { let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string())) ); } #[test] fn test_route_to_regex_characters_after_wildcard() { let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::CharactersAfterWildcard) ); } #[test] fn test_route_to_regex_invalid_ending() { let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/"); assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash)); } fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::List(ref list)) = attr { if list.ident == name { for thing in &list.nested { if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing { return Some(str_lit.value()); } } } } } None } fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::Word(ref ident)) = attr { if ident == name { return true; } } } false } fn get_struct_fiel
ta) -> Vec<syn::Field> { match data { syn::Data::Struct(data_struct) => match data_struct.fields { syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(), _ => panic!("Struct fields must be named"), }, _ => panic!("AppRoute derive is only supported for structs"), } } fn field_is_option(field: &syn::Field) -> bool { match field.ty { syn::Type::Path(ref type_path) => type_path .path .segments .iter() .last() .map(|segment| segment.ident == "Option") .unwrap_or(false), _ => false, } } #[proc_macro_derive(AppRoute, attributes(route, query))] pub fn app_route_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); let struct_fields = get_struct_fields(&input.data); let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields .into_iter() .partition(|f|!has_flag_attr("query", &f.attrs)); let name = &input.ident; let generics = input.generics; let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let route_string = get_string_attr("route", &input.attrs); let url_route = route_string.expect( "derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct", ); let (route_regex_str, format_str) = route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex"); // Validate route_regex and make sure struct and route have matching fields let route_regex = Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex"); let regex_capture_names_set: HashSet<String> = route_regex .capture_names() .filter_map(|c_opt| c_opt.map(|c| c.to_string())) .collect(); let field_names_set: HashSet<String> = route_fields .clone() .into_iter() .map(|f| f.ident.unwrap().to_string()) .collect(); if regex_capture_names_set!= field_names_set { let missing_from_route = field_names_set.difference(&regex_capture_names_set); let missing_from_struct = regex_capture_names_set.difference(&field_names_set); let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct); panic!(error_msg); } let route_field_assignments = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); let f_ident_str = f_ident.to_string(); quote! { #f_ident: captures[#f_ident_str].parse().map_err(|e| { RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e)) })? } }); let query_field_assignments = query_fields.clone().into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { #f_ident: query_string.and_then(|q| qs::from_str(q).ok()) } } else { quote! { #f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))? } } }); let route_field_parsers = quote! { #( #route_field_assignments ),* }; let query_field_parsers = quote! { #( #query_field_assignments ),* }; let format_args = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); quote! { #f_ident = self.#f_ident } }); let format_args = quote! { #( #format_args ),* }; let query_field_to_string_statements = query_fields.into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok()) } } else { quote! { qs::to_string(&self.#f_ident).ok() } } }); let encoded_query_fields = quote! { #( #query_field_to_string_statements ),* }; let struct_constructor = match ( route_field_parsers.is_empty(), query_field_parsers.is_empty(), ) { (true, true) => quote! { #name {} }, (true, false) => quote! { #name { #query_field_parsers } }, (false, true) => quote! { #name { #route_field_parsers } }, (false, false) => quote! { #name { #route_field_parsers, #query_field_parsers } }, }; let app_route_impl = quote! { impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause { fn path_pattern() -> String { #route_regex_str.to_string() } fn query_string(&self) -> Option<String> { use app_route::serde_qs as qs; // TODO - Remove duplicates because // there could be multiple fields with // a #[query] attribute that have common fields // TODO - can this be done with an on-stack array? let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields]; let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect(); if!filtered.is_empty() { Some(filtered.join("&")) } else { None } } } impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(query) = self.query_string() { let path = format!( #format_str, #format_args ); write!(f, "{}?{}", path, query) } else { write!( f, #format_str, #format_args ) } } } impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause { type Err = app_route::RouteParseErr; fn from_str(app_path: &str) -> Result<Self, Self::Err> { use app_route::serde_qs as qs; use app_route::RouteParseErr; app_route::lazy_static! { static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex"); } let question_pos = app_path.find('?'); let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))]; let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?; let query_string = question_pos.map(|question_pos| { let mut query_string = &app_path[question_pos..]; if query_string.starts_with('?') { query_string = &query_string[1..]; } query_string }); Ok(#struct_constructor) } } }; let impl_wrapper = syn::Ident::new( &format!("_IMPL_APPROUTE_FOR_{}", name.to_string()), proc_macro2::Span::call_site(), ); let out = quote! { const #impl_wrapper: () = { extern crate app_route; #app_route_impl }; }; out.into() }
ds(data: &syn::Da
identifier_name
lib.rs
#![recursion_limit = "256"] extern crate proc_macro; use proc_macro::TokenStream; use proc_macro2; use quote::quote; use regex::Regex; use std::collections::HashSet; use syn::{parse_macro_input, DeriveInput}; #[derive(Debug, PartialEq)] enum RouteToRegexError { MissingLeadingForwardSlash, NonAsciiChars, InvalidIdentifier(String), InvalidTrailingSlash, CharactersAfterWildcard, } fn route_to_regex(route: &str) -> Result<(String, String), RouteToRegexError> { enum ParseState { Initial, Static, VarName(String), WildcardFound, }; if!route.is_ascii() { return Err(RouteToRegexError::NonAsciiChars); } let ident_regex = Regex::new(r"^[a-zA-Z][a-zA-Z0-9_]*$").unwrap(); let mut regex = "".to_string(); let mut format_str = "".to_string(); let mut parse_state = ParseState::Initial; for byte in route.chars() { match parse_state { ParseState::Initial => { if byte!= '/' { return Err(RouteToRegexError::MissingLeadingForwardSlash); } regex += "^/"; format_str += "/"; parse_state = ParseState::Static; } ParseState::Static => { if byte == ':' { format_str.push('{'); parse_state = ParseState::VarName("".to_string()); } else { regex.push(byte); format_str.push(byte); parse_state = ParseState::Static; } } ParseState::VarName(mut name) => { if byte == '/' { // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name)
regex += &format!("(?P<{}>[^/]+)/", name); format_str += &format!("{}}}/", name); parse_state = ParseState::Static; } else if byte == '*' { // Found a wildcard - add the var name to the regex // Validate 'name' as a Rust identifier if!ident_regex.is_match(&name) { return Err(RouteToRegexError::InvalidIdentifier(name)); } regex += &format!("(?P<{}>.*)", name); format_str += &format!("{}}}", name); parse_state = ParseState::WildcardFound; } else { name.push(byte); parse_state = ParseState::VarName(name); } } ParseState::WildcardFound => { return Err(RouteToRegexError::CharactersAfterWildcard); } }; } if let ParseState::VarName(name) = parse_state { regex += &format!("(?P<{}>[^/]+)", name); format_str += &format!("{}}}", name); } if regex.ends_with('/') { return Err(RouteToRegexError::InvalidTrailingSlash); } regex += "$"; Ok((regex, format_str)) } #[test] fn test_route_to_regex() { let (regex, _) = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired").unwrap(); assert_eq!( regex, r"^/p/(?P<project_id>[^/]+)/exams/(?P<exam_id>[^/]+)/submissions_expired$" ); } #[test] fn test_route_to_regex_no_path_params() { let (regex, _) = route_to_regex("/p/exams/submissions_expired").unwrap(); assert_eq!(regex, r"^/p/exams/submissions_expired$"); } #[test] fn test_route_to_regex_no_leading_slash() { let regex = route_to_regex("p/exams/submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::MissingLeadingForwardSlash)); } #[test] fn test_route_to_regex_non_ascii_chars() { let regex = route_to_regex("🥖p🥖:project_id🥖exams🥖:exam_id🥖submissions_expired"); assert_eq!(regex, Err(RouteToRegexError::NonAsciiChars)); } #[test] fn test_route_to_regex_invalid_ident() { let regex = route_to_regex("/p/:project_id/exams/:_exam_id/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::InvalidIdentifier("_exam_id".to_string())) ); } #[test] fn test_route_to_regex_characters_after_wildcard() { let regex = route_to_regex("/p/:project_id/exams/:exam*ID/submissions_expired"); assert_eq!( regex, Err(RouteToRegexError::CharactersAfterWildcard) ); } #[test] fn test_route_to_regex_invalid_ending() { let regex = route_to_regex("/p/:project_id/exams/:exam_id/submissions_expired/"); assert_eq!(regex, Err(RouteToRegexError::InvalidTrailingSlash)); } fn get_string_attr(name: &str, attrs: &[syn::Attribute]) -> Option<String> { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::List(ref list)) = attr { if list.ident == name { for thing in &list.nested { if let syn::NestedMeta::Literal(syn::Lit::Str(str_lit)) = thing { return Some(str_lit.value()); } } } } } None } fn has_flag_attr(name: &str, attrs: &[syn::Attribute]) -> bool { for attr in attrs { let attr = attr.parse_meta(); if let Ok(syn::Meta::Word(ref ident)) = attr { if ident == name { return true; } } } false } fn get_struct_fields(data: &syn::Data) -> Vec<syn::Field> { match data { syn::Data::Struct(data_struct) => match data_struct.fields { syn::Fields::Named(ref named_fields) => named_fields.named.iter().cloned().collect(), _ => panic!("Struct fields must be named"), }, _ => panic!("AppRoute derive is only supported for structs"), } } fn field_is_option(field: &syn::Field) -> bool { match field.ty { syn::Type::Path(ref type_path) => type_path .path .segments .iter() .last() .map(|segment| segment.ident == "Option") .unwrap_or(false), _ => false, } } #[proc_macro_derive(AppRoute, attributes(route, query))] pub fn app_route_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); let struct_fields = get_struct_fields(&input.data); let (route_fields, query_fields): (Vec<_>, Vec<_>) = struct_fields .into_iter() .partition(|f|!has_flag_attr("query", &f.attrs)); let name = &input.ident; let generics = input.generics; let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); let route_string = get_string_attr("route", &input.attrs); let url_route = route_string.expect( "derive(AppRoute) requires a #[route(\"/your/route/here\")] attribute on the struct", ); let (route_regex_str, format_str) = route_to_regex(&url_route).expect("Could not convert route attribute to a valid regex"); // Validate route_regex and make sure struct and route have matching fields let route_regex = Regex::new(&route_regex_str).expect("route attribute was not compiled into a valid regex"); let regex_capture_names_set: HashSet<String> = route_regex .capture_names() .filter_map(|c_opt| c_opt.map(|c| c.to_string())) .collect(); let field_names_set: HashSet<String> = route_fields .clone() .into_iter() .map(|f| f.ident.unwrap().to_string()) .collect(); if regex_capture_names_set!= field_names_set { let missing_from_route = field_names_set.difference(&regex_capture_names_set); let missing_from_struct = regex_capture_names_set.difference(&field_names_set); let error_msg = format!("\nFields in struct missing from route pattern: {:?}\nFields in route missing from struct: {:?}", missing_from_route, missing_from_struct); panic!(error_msg); } let route_field_assignments = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); let f_ident_str = f_ident.to_string(); quote! { #f_ident: captures[#f_ident_str].parse().map_err(|e| { RouteParseErr::ParamParseErr(std::string::ToString::to_string(&e)) })? } }); let query_field_assignments = query_fields.clone().into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { #f_ident: query_string.and_then(|q| qs::from_str(q).ok()) } } else { quote! { #f_ident: qs::from_str(query_string.ok_or(RouteParseErr::NoQueryString)?).map_err(|e| RouteParseErr::QueryParseErr(e.description().to_string()))? } } }); let route_field_parsers = quote! { #( #route_field_assignments ),* }; let query_field_parsers = quote! { #( #query_field_assignments ),* }; let format_args = route_fields.clone().into_iter().map(|f| { let f_ident = f.ident.unwrap(); quote! { #f_ident = self.#f_ident } }); let format_args = quote! { #( #format_args ),* }; let query_field_to_string_statements = query_fields.into_iter().map(|f| { let is_option = field_is_option(&f); let f_ident = f.ident.unwrap(); if is_option { quote! { self.#f_ident.as_ref().and_then(|q| qs::to_string(&q).ok()) } } else { quote! { qs::to_string(&self.#f_ident).ok() } } }); let encoded_query_fields = quote! { #( #query_field_to_string_statements ),* }; let struct_constructor = match ( route_field_parsers.is_empty(), query_field_parsers.is_empty(), ) { (true, true) => quote! { #name {} }, (true, false) => quote! { #name { #query_field_parsers } }, (false, true) => quote! { #name { #route_field_parsers } }, (false, false) => quote! { #name { #route_field_parsers, #query_field_parsers } }, }; let app_route_impl = quote! { impl #impl_generics app_route::AppRoute for #name #ty_generics #where_clause { fn path_pattern() -> String { #route_regex_str.to_string() } fn query_string(&self) -> Option<String> { use app_route::serde_qs as qs; // TODO - Remove duplicates because // there could be multiple fields with // a #[query] attribute that have common fields // TODO - can this be done with an on-stack array? let encoded_queries: Vec<Option<String>> = vec![#encoded_query_fields]; let filtered: Vec<_> = encoded_queries.into_iter().filter_map(std::convert::identity).collect(); if!filtered.is_empty() { Some(filtered.join("&")) } else { None } } } impl #impl_generics std::fmt::Display for #name #ty_generics #where_clause { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { if let Some(query) = self.query_string() { let path = format!( #format_str, #format_args ); write!(f, "{}?{}", path, query) } else { write!( f, #format_str, #format_args ) } } } impl #impl_generics std::str::FromStr for #name #ty_generics #where_clause { type Err = app_route::RouteParseErr; fn from_str(app_path: &str) -> Result<Self, Self::Err> { use app_route::serde_qs as qs; use app_route::RouteParseErr; app_route::lazy_static! { static ref ROUTE_REGEX: app_route::Regex = app_route::Regex::new(#route_regex_str).expect("Failed to compile regex"); } let question_pos = app_path.find('?'); let just_path = &app_path[..(question_pos.unwrap_or_else(|| app_path.len()))]; let captures = (*ROUTE_REGEX).captures(just_path).ok_or(RouteParseErr::NoMatches)?; let query_string = question_pos.map(|question_pos| { let mut query_string = &app_path[question_pos..]; if query_string.starts_with('?') { query_string = &query_string[1..]; } query_string }); Ok(#struct_constructor) } } }; let impl_wrapper = syn::Ident::new( &format!("_IMPL_APPROUTE_FOR_{}", name.to_string()), proc_macro2::Span::call_site(), ); let out = quote! { const #impl_wrapper: () = { extern crate app_route; #app_route_impl }; }; out.into() }
{ return Err(RouteToRegexError::InvalidIdentifier(name)); }
conditional_block
partition.rs
use arrow_deps::{ arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr, datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names, datafusion::scalar::ScalarValue, }; use generated_types::wal as wb; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use wal::{Entry as WalEntry, Result as WalResult}; use data_types::TIME_COLUMN_NAME; use storage::{ predicate::{Predicate, TimestampRange}, util::{visit_expression, AndExprBuilder, ExpressionVisitor}, }; use crate::dictionary::Dictionary; use crate::table::Table; use snafu::{OptionExt, ResultExt, Snafu}; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Could not read WAL entry: {}", source))] WalEntryRead { source: wal::Error }, #[snafu(display("Partition {} not found", partition))] PartitionNotFound { partition: String }, #[snafu(display( "Column name {} not found in dictionary of partition {}", column, partition ))] ColumnNameNotFoundInDictionary { column: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Error writing table '{}': {}", table_name, source))] TableWrite { table_name: String, source: crate::table::Error, }, #[snafu(display("Table Error in '{}': {}", table_name, source))] NamedTableError { table_name: String, source: crate::table::Error, }, #[snafu(display( "Table name {} not found in dictionary of partition {}", table, partition ))] TableNameNotFoundInDictionary { table: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Table {} not found in partition {}", table, partition))] TableNotFoundInPartition { table: u32, partition: String }, #[snafu(display("Attempt to write table batch without a name"))] TableWriteWithoutName, #[snafu(display("Error restoring WAL entry, missing partition key"))] MissingPartitionKey, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Partition { pub key: String, /// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow /// string operations. The same dictionary is used for table names, tag names, tag values, and /// column names. // TODO: intern string field values too? pub dictionary: Dictionary, /// map of the dictionary ID for the table name to the table pub tables: HashMap<u32, Table>, pub is_open: bool, } /// Describes the result of translating a set of strings into /// partition specific ids #[derive(Debug, PartialEq, Eq)] pub enum PartitionIdSet { /// At least one of the strings was not present in the partitions' /// dictionary. /// /// This is important when testing for the presence of all ids in /// a set, as we know they can not all be present AtLeastOneMissing, /// All strings existed in this partition's dictionary Present(BTreeSet<u32>), } /// a 'Compiled' set of predicates / filters that can be evaluated on /// this partition (where strings have been translated to partition /// specific u32 ids) #[derive(Debug)] pub struct PartitionPredicate { /// If present, restrict the request to just those tables whose /// names are in table_names. If present but empty, means there /// was a predicate but no tables named that way exist in the /// partition (so no table can pass) pub table_name_predicate: Option<BTreeSet<u32>>, // Optional field column selection. If present, further restrict // any field columns returnedto only those named pub field_restriction: Option<BTreeSet<u32>>, /// General DataFusion expressions (arbitrary predicates) applied /// as a filter using logical conjuction (aka are 'AND'ed /// together). Only rows that evaluate to TRUE for all these /// expressions should be returned. pub partition_exprs: Vec<Expr>, /// If Some, then the table must contain all columns specified /// to pass the predicate pub required_columns: Option<PartitionIdSet>, /// The id of the "time" column in this partition pub time_column_id: u32, /// Timestamp range: only rows within this range should be considered pub range: Option<TimestampRange>, } impl PartitionPredicate { /// Creates and adds a datafuson predicate representing the /// combination of predicate and timestamp. pub fn filter_expr(&self) -> Option<Expr> { // build up a list of expressions let mut builder = AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr()); for expr in &self.partition_exprs { builder = builder.append_expr(expr.clone()); } builder.build() } /// Return true if there is a non empty field restriction pub fn has_field_restriction(&self) -> bool { match &self.field_restriction { None => false, Some(field_restiction) =>!field_restiction.is_empty(), } } /// For plans which select a subset of fields, returns true if /// the field should be included in the results pub fn should_include_field(&self, field_id: u32) -> bool { match &self.field_restriction { None => true, Some(field_restriction) => field_restriction.contains(&field_id), } } /// Return true if this column is the time column pub fn is_time_column(&self, id: u32) -> bool { self.time_column_id == id } /// Creates a DataFusion predicate for appliying a timestamp range: /// /// range.start <= time and time < range.end` fn make_timestamp_predicate_expr(&self) -> Option<Expr> { self.range.map(|range| make_range_expr(&range)) } } /// Creates expression like: /// range.low <= time && time < range.high fn make_range_expr(range: &TimestampRange) -> Expr { let ts_low = Expr::BinaryExpr { left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))), op: Operator::LtEq, right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), }; let ts_high = Expr::BinaryExpr { left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), op: Operator::Lt, right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))), }; AndExprBuilder::default() .append_expr(ts_low) .append_expr(ts_high) .build() .unwrap() } impl Partition { pub fn new(key: impl Into<String>) -> Self { Self { key: key.into(), dictionary: Dictionary::new(), tables: HashMap::new(), is_open: true, } } pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> { if let Some(table_batches) = entry.table_batches() { for batch in table_batches { self.write_table_batch(&batch)?; } } Ok(()) } fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> { let table_name = batch.name().context(TableWriteWithoutName)?; let table_id = self.dictionary.lookup_value_or_insert(table_name); let table = self .tables .entry(table_id) .or_insert_with(|| Table::new(table_id)); if let Some(rows) = batch.rows() { table .append_rows(&mut self.dictionary, &rows) .context(TableWrite { table_name })?; } Ok(()) } /// Translates `predicate` into per-partition ids that can be /// directly evaluated against tables in this partition pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> { let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref()); let field_restriction = self.compile_string_list(predicate.field_columns.as_ref()); let time_column_id = self .dictionary .lookup_value(TIME_COLUMN_NAME) .expect("time is in the partition dictionary"); let range = predicate.range; // it would be nice to avoid cloning all the exprs here. let partition_exprs = predicate.exprs.clone(); // In order to evaluate expressions in the table, all columns // referenced in the expression must appear (I think, not sure // about NOT, etc so panic if we see one of those); let mut visitor = SupportVisitor {}; let mut predicate_columns: HashSet<String> = HashSet::new(); for expr in &partition_exprs { visit_expression(expr, &mut visitor); expr_to_column_names(&expr, &mut predicate_columns).unwrap(); } // if there are any column references in the expression, ensure they appear in any table let required_columns = if predicate_columns.is_empty() { None } else { Some(self.make_partition_ids(predicate_columns.iter())) }; Ok(PartitionPredicate { table_name_predicate, field_restriction, partition_exprs, required_columns, time_column_id, range, }) } /// Converts a potential set of strings into a set of ids in terms /// of this dictionary. If there are no matching Strings in the /// partitions dictionary, those strings are ignored and a /// (potentially empty) set is returned. fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> { names.map(|names| { names .iter() .filter_map(|name| self.dictionary.id(name)) .collect::<BTreeSet<_>>() }) } /// Adds the ids of any columns in additional_required_columns to the required columns of predicate pub fn add_required_columns_to_predicate( &self, additional_required_columns: &HashSet<String>, predicate: &mut PartitionPredicate, ) { for column_name in additional_required_columns { // Once know we have missing columns, no need to try // and figure out if these any additional columns are needed if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns { return; } let column_id = self.dictionary.id(column_name); // Update the required colunm list predicate.required_columns = Some(match predicate.required_columns.take() { None => { if let Some(column_id) = column_id { let mut symbols = BTreeSet::new(); symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::Present(mut symbols)) => { if let Some(column_id) = column_id { symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::AtLeastOneMissing) => { unreachable!("Covered by case above while adding required columns to predicate") } }); } } /// returns true if data with partition key `key` should be /// written to this partition, pub fn should_write(&self, key: &str) -> bool { self.key.starts_with(key) && self.is_open } /// Convert the table specified in this partition into an arrow record batch pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> { let table_id = self.dictionary .lookup_value(table_name) .context(TableNameNotFoundInDictionary { table: table_name, partition: &self.key, })?; let table = self .tables .get(&table_id) .context(TableNotFoundInPartition { table: table_id, partition: &self.key, })?; table .to_arrow(&self, columns) .context(NamedTableError { table_name }) } /// Translate a bunch of strings into a set of ids relative to this partition pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet where I: Iterator<Item = &'a String>, { let mut symbols = BTreeSet::new(); for column_name in predicate_columns { if let Some(column_id) = self.dictionary.id(column_name) { symbols.insert(column_id); } else { return PartitionIdSet::AtLeastOneMissing; } } PartitionIdSet::Present(symbols) } } /// Used to figure out if we know how to deal with this kind of /// predicate in the write buffer struct SupportVisitor {} impl ExpressionVisitor for SupportVisitor { fn pre_visit(&mut self, expr: &Expr) { match expr { Expr::Literal(..) => {} Expr::Column(..) =>
Expr::BinaryExpr { op,.. } => { match op { Operator::Eq | Operator::Lt | Operator::LtEq | Operator::Gt | Operator::GtEq | Operator::Plus | Operator::Minus | Operator::Multiply | Operator::Divide | Operator::And | Operator::Or => {} // Unsupported (need to think about ramifications) Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => { panic!("Unsupported binary operator in expression: {:?}", expr) } } } _ => panic!( "Unsupported expression in write_buffer database: {:?}", expr ), } } } #[derive(Default, Debug)] pub struct RestorationStats { pub row_count: usize, pub tables: BTreeSet<String>, } /// Given a set of WAL entries, restore them into a set of Partitions. pub fn restore_partitions_from_wal( wal_entries: impl Iterator<Item = WalResult<WalEntry>>, ) -> Result<(Vec<Partition>, RestorationStats)> { let mut stats = RestorationStats::default(); let mut partitions = BTreeMap::new(); for wal_entry in wal_entries { let wal_entry = wal_entry.context(WalEntryRead)?; let bytes = wal_entry.as_data(); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes); if let Some(entries) = batch.entries() { for entry in entries { let partition_key = entry.partition_key().context(MissingPartitionKey)?; if!partitions.contains_key(partition_key) { partitions.insert( partition_key.to_string(), Partition::new(partition_key.to_string()), ); } let partition = partitions .get_mut(partition_key) .context(PartitionNotFound { partition: partition_key, })?; partition.write_entry(&entry)?; } } } let partitions = partitions .into_iter() .map(|(_, p)| p) .collect::<Vec<Partition>>(); // compute the stats for p in &partitions { for (id, table) in &p.tables { let name = p .dictionary .lookup_id(*id) .expect("table id wasn't inserted into dictionary on restore"); if!stats.tables.contains(name) { stats.tables.insert(name.to_string()); } stats.row_count += table.row_count(); } } Ok((partitions, stats)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_make_range_expr() { // Test that the generated predicate is correct let range = TimestampRange::new(101, 202); let ts_predicate_expr = make_range_expr(&range); let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)"; let actual_string = format!("{:?}", ts_predicate_expr); assert_eq!(actual_string, expected_string); } }
{}
conditional_block
partition.rs
use arrow_deps::{ arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr, datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names, datafusion::scalar::ScalarValue, }; use generated_types::wal as wb; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use wal::{Entry as WalEntry, Result as WalResult}; use data_types::TIME_COLUMN_NAME; use storage::{ predicate::{Predicate, TimestampRange}, util::{visit_expression, AndExprBuilder, ExpressionVisitor}, }; use crate::dictionary::Dictionary; use crate::table::Table; use snafu::{OptionExt, ResultExt, Snafu}; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Could not read WAL entry: {}", source))] WalEntryRead { source: wal::Error }, #[snafu(display("Partition {} not found", partition))] PartitionNotFound { partition: String }, #[snafu(display( "Column name {} not found in dictionary of partition {}", column, partition ))] ColumnNameNotFoundInDictionary { column: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Error writing table '{}': {}", table_name, source))] TableWrite { table_name: String, source: crate::table::Error, }, #[snafu(display("Table Error in '{}': {}", table_name, source))] NamedTableError { table_name: String, source: crate::table::Error, }, #[snafu(display( "Table name {} not found in dictionary of partition {}", table, partition ))] TableNameNotFoundInDictionary { table: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Table {} not found in partition {}", table, partition))] TableNotFoundInPartition { table: u32, partition: String }, #[snafu(display("Attempt to write table batch without a name"))] TableWriteWithoutName, #[snafu(display("Error restoring WAL entry, missing partition key"))] MissingPartitionKey, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Partition { pub key: String, /// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow /// string operations. The same dictionary is used for table names, tag names, tag values, and /// column names. // TODO: intern string field values too? pub dictionary: Dictionary, /// map of the dictionary ID for the table name to the table pub tables: HashMap<u32, Table>, pub is_open: bool, } /// Describes the result of translating a set of strings into /// partition specific ids #[derive(Debug, PartialEq, Eq)] pub enum PartitionIdSet { /// At least one of the strings was not present in the partitions' /// dictionary. /// /// This is important when testing for the presence of all ids in /// a set, as we know they can not all be present AtLeastOneMissing, /// All strings existed in this partition's dictionary Present(BTreeSet<u32>), } /// a 'Compiled' set of predicates / filters that can be evaluated on /// this partition (where strings have been translated to partition /// specific u32 ids) #[derive(Debug)] pub struct PartitionPredicate { /// If present, restrict the request to just those tables whose /// names are in table_names. If present but empty, means there /// was a predicate but no tables named that way exist in the /// partition (so no table can pass) pub table_name_predicate: Option<BTreeSet<u32>>, // Optional field column selection. If present, further restrict // any field columns returnedto only those named pub field_restriction: Option<BTreeSet<u32>>, /// General DataFusion expressions (arbitrary predicates) applied /// as a filter using logical conjuction (aka are 'AND'ed /// together). Only rows that evaluate to TRUE for all these /// expressions should be returned. pub partition_exprs: Vec<Expr>, /// If Some, then the table must contain all columns specified /// to pass the predicate pub required_columns: Option<PartitionIdSet>, /// The id of the "time" column in this partition pub time_column_id: u32, /// Timestamp range: only rows within this range should be considered pub range: Option<TimestampRange>, } impl PartitionPredicate { /// Creates and adds a datafuson predicate representing the /// combination of predicate and timestamp. pub fn filter_expr(&self) -> Option<Expr> { // build up a list of expressions let mut builder = AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr()); for expr in &self.partition_exprs { builder = builder.append_expr(expr.clone()); } builder.build() } /// Return true if there is a non empty field restriction pub fn has_field_restriction(&self) -> bool { match &self.field_restriction { None => false, Some(field_restiction) =>!field_restiction.is_empty(), } } /// For plans which select a subset of fields, returns true if /// the field should be included in the results pub fn should_include_field(&self, field_id: u32) -> bool
/// Return true if this column is the time column pub fn is_time_column(&self, id: u32) -> bool { self.time_column_id == id } /// Creates a DataFusion predicate for appliying a timestamp range: /// /// range.start <= time and time < range.end` fn make_timestamp_predicate_expr(&self) -> Option<Expr> { self.range.map(|range| make_range_expr(&range)) } } /// Creates expression like: /// range.low <= time && time < range.high fn make_range_expr(range: &TimestampRange) -> Expr { let ts_low = Expr::BinaryExpr { left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))), op: Operator::LtEq, right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), }; let ts_high = Expr::BinaryExpr { left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), op: Operator::Lt, right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))), }; AndExprBuilder::default() .append_expr(ts_low) .append_expr(ts_high) .build() .unwrap() } impl Partition { pub fn new(key: impl Into<String>) -> Self { Self { key: key.into(), dictionary: Dictionary::new(), tables: HashMap::new(), is_open: true, } } pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> { if let Some(table_batches) = entry.table_batches() { for batch in table_batches { self.write_table_batch(&batch)?; } } Ok(()) } fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> { let table_name = batch.name().context(TableWriteWithoutName)?; let table_id = self.dictionary.lookup_value_or_insert(table_name); let table = self .tables .entry(table_id) .or_insert_with(|| Table::new(table_id)); if let Some(rows) = batch.rows() { table .append_rows(&mut self.dictionary, &rows) .context(TableWrite { table_name })?; } Ok(()) } /// Translates `predicate` into per-partition ids that can be /// directly evaluated against tables in this partition pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> { let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref()); let field_restriction = self.compile_string_list(predicate.field_columns.as_ref()); let time_column_id = self .dictionary .lookup_value(TIME_COLUMN_NAME) .expect("time is in the partition dictionary"); let range = predicate.range; // it would be nice to avoid cloning all the exprs here. let partition_exprs = predicate.exprs.clone(); // In order to evaluate expressions in the table, all columns // referenced in the expression must appear (I think, not sure // about NOT, etc so panic if we see one of those); let mut visitor = SupportVisitor {}; let mut predicate_columns: HashSet<String> = HashSet::new(); for expr in &partition_exprs { visit_expression(expr, &mut visitor); expr_to_column_names(&expr, &mut predicate_columns).unwrap(); } // if there are any column references in the expression, ensure they appear in any table let required_columns = if predicate_columns.is_empty() { None } else { Some(self.make_partition_ids(predicate_columns.iter())) }; Ok(PartitionPredicate { table_name_predicate, field_restriction, partition_exprs, required_columns, time_column_id, range, }) } /// Converts a potential set of strings into a set of ids in terms /// of this dictionary. If there are no matching Strings in the /// partitions dictionary, those strings are ignored and a /// (potentially empty) set is returned. fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> { names.map(|names| { names .iter() .filter_map(|name| self.dictionary.id(name)) .collect::<BTreeSet<_>>() }) } /// Adds the ids of any columns in additional_required_columns to the required columns of predicate pub fn add_required_columns_to_predicate( &self, additional_required_columns: &HashSet<String>, predicate: &mut PartitionPredicate, ) { for column_name in additional_required_columns { // Once know we have missing columns, no need to try // and figure out if these any additional columns are needed if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns { return; } let column_id = self.dictionary.id(column_name); // Update the required colunm list predicate.required_columns = Some(match predicate.required_columns.take() { None => { if let Some(column_id) = column_id { let mut symbols = BTreeSet::new(); symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::Present(mut symbols)) => { if let Some(column_id) = column_id { symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::AtLeastOneMissing) => { unreachable!("Covered by case above while adding required columns to predicate") } }); } } /// returns true if data with partition key `key` should be /// written to this partition, pub fn should_write(&self, key: &str) -> bool { self.key.starts_with(key) && self.is_open } /// Convert the table specified in this partition into an arrow record batch pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> { let table_id = self.dictionary .lookup_value(table_name) .context(TableNameNotFoundInDictionary { table: table_name, partition: &self.key, })?; let table = self .tables .get(&table_id) .context(TableNotFoundInPartition { table: table_id, partition: &self.key, })?; table .to_arrow(&self, columns) .context(NamedTableError { table_name }) } /// Translate a bunch of strings into a set of ids relative to this partition pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet where I: Iterator<Item = &'a String>, { let mut symbols = BTreeSet::new(); for column_name in predicate_columns { if let Some(column_id) = self.dictionary.id(column_name) { symbols.insert(column_id); } else { return PartitionIdSet::AtLeastOneMissing; } } PartitionIdSet::Present(symbols) } } /// Used to figure out if we know how to deal with this kind of /// predicate in the write buffer struct SupportVisitor {} impl ExpressionVisitor for SupportVisitor { fn pre_visit(&mut self, expr: &Expr) { match expr { Expr::Literal(..) => {} Expr::Column(..) => {} Expr::BinaryExpr { op,.. } => { match op { Operator::Eq | Operator::Lt | Operator::LtEq | Operator::Gt | Operator::GtEq | Operator::Plus | Operator::Minus | Operator::Multiply | Operator::Divide | Operator::And | Operator::Or => {} // Unsupported (need to think about ramifications) Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => { panic!("Unsupported binary operator in expression: {:?}", expr) } } } _ => panic!( "Unsupported expression in write_buffer database: {:?}", expr ), } } } #[derive(Default, Debug)] pub struct RestorationStats { pub row_count: usize, pub tables: BTreeSet<String>, } /// Given a set of WAL entries, restore them into a set of Partitions. pub fn restore_partitions_from_wal( wal_entries: impl Iterator<Item = WalResult<WalEntry>>, ) -> Result<(Vec<Partition>, RestorationStats)> { let mut stats = RestorationStats::default(); let mut partitions = BTreeMap::new(); for wal_entry in wal_entries { let wal_entry = wal_entry.context(WalEntryRead)?; let bytes = wal_entry.as_data(); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes); if let Some(entries) = batch.entries() { for entry in entries { let partition_key = entry.partition_key().context(MissingPartitionKey)?; if!partitions.contains_key(partition_key) { partitions.insert( partition_key.to_string(), Partition::new(partition_key.to_string()), ); } let partition = partitions .get_mut(partition_key) .context(PartitionNotFound { partition: partition_key, })?; partition.write_entry(&entry)?; } } } let partitions = partitions .into_iter() .map(|(_, p)| p) .collect::<Vec<Partition>>(); // compute the stats for p in &partitions { for (id, table) in &p.tables { let name = p .dictionary .lookup_id(*id) .expect("table id wasn't inserted into dictionary on restore"); if!stats.tables.contains(name) { stats.tables.insert(name.to_string()); } stats.row_count += table.row_count(); } } Ok((partitions, stats)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_make_range_expr() { // Test that the generated predicate is correct let range = TimestampRange::new(101, 202); let ts_predicate_expr = make_range_expr(&range); let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)"; let actual_string = format!("{:?}", ts_predicate_expr); assert_eq!(actual_string, expected_string); } }
{ match &self.field_restriction { None => true, Some(field_restriction) => field_restriction.contains(&field_id), } }
identifier_body
partition.rs
use arrow_deps::{ arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr, datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names, datafusion::scalar::ScalarValue, }; use generated_types::wal as wb; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use wal::{Entry as WalEntry, Result as WalResult}; use data_types::TIME_COLUMN_NAME; use storage::{ predicate::{Predicate, TimestampRange}, util::{visit_expression, AndExprBuilder, ExpressionVisitor}, }; use crate::dictionary::Dictionary; use crate::table::Table; use snafu::{OptionExt, ResultExt, Snafu}; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Could not read WAL entry: {}", source))] WalEntryRead { source: wal::Error }, #[snafu(display("Partition {} not found", partition))] PartitionNotFound { partition: String }, #[snafu(display( "Column name {} not found in dictionary of partition {}", column, partition ))] ColumnNameNotFoundInDictionary { column: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Error writing table '{}': {}", table_name, source))] TableWrite { table_name: String, source: crate::table::Error, }, #[snafu(display("Table Error in '{}': {}", table_name, source))] NamedTableError { table_name: String, source: crate::table::Error, }, #[snafu(display( "Table name {} not found in dictionary of partition {}", table, partition ))] TableNameNotFoundInDictionary { table: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Table {} not found in partition {}", table, partition))] TableNotFoundInPartition { table: u32, partition: String }, #[snafu(display("Attempt to write table batch without a name"))] TableWriteWithoutName, #[snafu(display("Error restoring WAL entry, missing partition key"))] MissingPartitionKey, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Partition { pub key: String, /// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow /// string operations. The same dictionary is used for table names, tag names, tag values, and /// column names. // TODO: intern string field values too? pub dictionary: Dictionary, /// map of the dictionary ID for the table name to the table pub tables: HashMap<u32, Table>, pub is_open: bool, } /// Describes the result of translating a set of strings into /// partition specific ids #[derive(Debug, PartialEq, Eq)] pub enum PartitionIdSet { /// At least one of the strings was not present in the partitions' /// dictionary. /// /// This is important when testing for the presence of all ids in /// a set, as we know they can not all be present AtLeastOneMissing, /// All strings existed in this partition's dictionary Present(BTreeSet<u32>), } /// a 'Compiled' set of predicates / filters that can be evaluated on /// this partition (where strings have been translated to partition /// specific u32 ids) #[derive(Debug)] pub struct PartitionPredicate { /// If present, restrict the request to just those tables whose /// names are in table_names. If present but empty, means there /// was a predicate but no tables named that way exist in the /// partition (so no table can pass) pub table_name_predicate: Option<BTreeSet<u32>>, // Optional field column selection. If present, further restrict // any field columns returnedto only those named pub field_restriction: Option<BTreeSet<u32>>, /// General DataFusion expressions (arbitrary predicates) applied /// as a filter using logical conjuction (aka are 'AND'ed /// together). Only rows that evaluate to TRUE for all these /// expressions should be returned. pub partition_exprs: Vec<Expr>, /// If Some, then the table must contain all columns specified /// to pass the predicate pub required_columns: Option<PartitionIdSet>, /// The id of the "time" column in this partition pub time_column_id: u32, /// Timestamp range: only rows within this range should be considered pub range: Option<TimestampRange>, } impl PartitionPredicate { /// Creates and adds a datafuson predicate representing the /// combination of predicate and timestamp. pub fn filter_expr(&self) -> Option<Expr> { // build up a list of expressions let mut builder = AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr()); for expr in &self.partition_exprs { builder = builder.append_expr(expr.clone()); } builder.build() } /// Return true if there is a non empty field restriction pub fn has_field_restriction(&self) -> bool { match &self.field_restriction { None => false, Some(field_restiction) =>!field_restiction.is_empty(), } } /// For plans which select a subset of fields, returns true if /// the field should be included in the results pub fn should_include_field(&self, field_id: u32) -> bool { match &self.field_restriction { None => true, Some(field_restriction) => field_restriction.contains(&field_id), } } /// Return true if this column is the time column pub fn is_time_column(&self, id: u32) -> bool { self.time_column_id == id } /// Creates a DataFusion predicate for appliying a timestamp range: /// /// range.start <= time and time < range.end` fn make_timestamp_predicate_expr(&self) -> Option<Expr> { self.range.map(|range| make_range_expr(&range)) } } /// Creates expression like: /// range.low <= time && time < range.high fn make_range_expr(range: &TimestampRange) -> Expr { let ts_low = Expr::BinaryExpr { left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))), op: Operator::LtEq, right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), }; let ts_high = Expr::BinaryExpr { left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), op: Operator::Lt, right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))), }; AndExprBuilder::default() .append_expr(ts_low) .append_expr(ts_high) .build() .unwrap() } impl Partition { pub fn new(key: impl Into<String>) -> Self { Self { key: key.into(), dictionary: Dictionary::new(), tables: HashMap::new(), is_open: true, } } pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> { if let Some(table_batches) = entry.table_batches() { for batch in table_batches { self.write_table_batch(&batch)?; } } Ok(()) } fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> { let table_name = batch.name().context(TableWriteWithoutName)?; let table_id = self.dictionary.lookup_value_or_insert(table_name); let table = self .tables .entry(table_id) .or_insert_with(|| Table::new(table_id)); if let Some(rows) = batch.rows() { table .append_rows(&mut self.dictionary, &rows) .context(TableWrite { table_name })?; } Ok(()) } /// Translates `predicate` into per-partition ids that can be /// directly evaluated against tables in this partition pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> { let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref()); let field_restriction = self.compile_string_list(predicate.field_columns.as_ref()); let time_column_id = self .dictionary .lookup_value(TIME_COLUMN_NAME) .expect("time is in the partition dictionary"); let range = predicate.range; // it would be nice to avoid cloning all the exprs here. let partition_exprs = predicate.exprs.clone(); // In order to evaluate expressions in the table, all columns // referenced in the expression must appear (I think, not sure // about NOT, etc so panic if we see one of those); let mut visitor = SupportVisitor {}; let mut predicate_columns: HashSet<String> = HashSet::new(); for expr in &partition_exprs { visit_expression(expr, &mut visitor); expr_to_column_names(&expr, &mut predicate_columns).unwrap(); } // if there are any column references in the expression, ensure they appear in any table let required_columns = if predicate_columns.is_empty() { None } else { Some(self.make_partition_ids(predicate_columns.iter())) }; Ok(PartitionPredicate { table_name_predicate, field_restriction, partition_exprs, required_columns, time_column_id,
/// of this dictionary. If there are no matching Strings in the /// partitions dictionary, those strings are ignored and a /// (potentially empty) set is returned. fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> { names.map(|names| { names .iter() .filter_map(|name| self.dictionary.id(name)) .collect::<BTreeSet<_>>() }) } /// Adds the ids of any columns in additional_required_columns to the required columns of predicate pub fn add_required_columns_to_predicate( &self, additional_required_columns: &HashSet<String>, predicate: &mut PartitionPredicate, ) { for column_name in additional_required_columns { // Once know we have missing columns, no need to try // and figure out if these any additional columns are needed if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns { return; } let column_id = self.dictionary.id(column_name); // Update the required colunm list predicate.required_columns = Some(match predicate.required_columns.take() { None => { if let Some(column_id) = column_id { let mut symbols = BTreeSet::new(); symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::Present(mut symbols)) => { if let Some(column_id) = column_id { symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::AtLeastOneMissing) => { unreachable!("Covered by case above while adding required columns to predicate") } }); } } /// returns true if data with partition key `key` should be /// written to this partition, pub fn should_write(&self, key: &str) -> bool { self.key.starts_with(key) && self.is_open } /// Convert the table specified in this partition into an arrow record batch pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> { let table_id = self.dictionary .lookup_value(table_name) .context(TableNameNotFoundInDictionary { table: table_name, partition: &self.key, })?; let table = self .tables .get(&table_id) .context(TableNotFoundInPartition { table: table_id, partition: &self.key, })?; table .to_arrow(&self, columns) .context(NamedTableError { table_name }) } /// Translate a bunch of strings into a set of ids relative to this partition pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet where I: Iterator<Item = &'a String>, { let mut symbols = BTreeSet::new(); for column_name in predicate_columns { if let Some(column_id) = self.dictionary.id(column_name) { symbols.insert(column_id); } else { return PartitionIdSet::AtLeastOneMissing; } } PartitionIdSet::Present(symbols) } } /// Used to figure out if we know how to deal with this kind of /// predicate in the write buffer struct SupportVisitor {} impl ExpressionVisitor for SupportVisitor { fn pre_visit(&mut self, expr: &Expr) { match expr { Expr::Literal(..) => {} Expr::Column(..) => {} Expr::BinaryExpr { op,.. } => { match op { Operator::Eq | Operator::Lt | Operator::LtEq | Operator::Gt | Operator::GtEq | Operator::Plus | Operator::Minus | Operator::Multiply | Operator::Divide | Operator::And | Operator::Or => {} // Unsupported (need to think about ramifications) Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => { panic!("Unsupported binary operator in expression: {:?}", expr) } } } _ => panic!( "Unsupported expression in write_buffer database: {:?}", expr ), } } } #[derive(Default, Debug)] pub struct RestorationStats { pub row_count: usize, pub tables: BTreeSet<String>, } /// Given a set of WAL entries, restore them into a set of Partitions. pub fn restore_partitions_from_wal( wal_entries: impl Iterator<Item = WalResult<WalEntry>>, ) -> Result<(Vec<Partition>, RestorationStats)> { let mut stats = RestorationStats::default(); let mut partitions = BTreeMap::new(); for wal_entry in wal_entries { let wal_entry = wal_entry.context(WalEntryRead)?; let bytes = wal_entry.as_data(); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes); if let Some(entries) = batch.entries() { for entry in entries { let partition_key = entry.partition_key().context(MissingPartitionKey)?; if!partitions.contains_key(partition_key) { partitions.insert( partition_key.to_string(), Partition::new(partition_key.to_string()), ); } let partition = partitions .get_mut(partition_key) .context(PartitionNotFound { partition: partition_key, })?; partition.write_entry(&entry)?; } } } let partitions = partitions .into_iter() .map(|(_, p)| p) .collect::<Vec<Partition>>(); // compute the stats for p in &partitions { for (id, table) in &p.tables { let name = p .dictionary .lookup_id(*id) .expect("table id wasn't inserted into dictionary on restore"); if!stats.tables.contains(name) { stats.tables.insert(name.to_string()); } stats.row_count += table.row_count(); } } Ok((partitions, stats)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_make_range_expr() { // Test that the generated predicate is correct let range = TimestampRange::new(101, 202); let ts_predicate_expr = make_range_expr(&range); let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)"; let actual_string = format!("{:?}", ts_predicate_expr); assert_eq!(actual_string, expected_string); } }
range, }) } /// Converts a potential set of strings into a set of ids in terms
random_line_split
partition.rs
use arrow_deps::{ arrow::record_batch::RecordBatch, datafusion::logical_plan::Expr, datafusion::logical_plan::Operator, datafusion::optimizer::utils::expr_to_column_names, datafusion::scalar::ScalarValue, }; use generated_types::wal as wb; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use wal::{Entry as WalEntry, Result as WalResult}; use data_types::TIME_COLUMN_NAME; use storage::{ predicate::{Predicate, TimestampRange}, util::{visit_expression, AndExprBuilder, ExpressionVisitor}, }; use crate::dictionary::Dictionary; use crate::table::Table; use snafu::{OptionExt, ResultExt, Snafu}; #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Could not read WAL entry: {}", source))] WalEntryRead { source: wal::Error }, #[snafu(display("Partition {} not found", partition))] PartitionNotFound { partition: String }, #[snafu(display( "Column name {} not found in dictionary of partition {}", column, partition ))] ColumnNameNotFoundInDictionary { column: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Error writing table '{}': {}", table_name, source))] TableWrite { table_name: String, source: crate::table::Error, }, #[snafu(display("Table Error in '{}': {}", table_name, source))] NamedTableError { table_name: String, source: crate::table::Error, }, #[snafu(display( "Table name {} not found in dictionary of partition {}", table, partition ))] TableNameNotFoundInDictionary { table: String, partition: String, source: crate::dictionary::Error, }, #[snafu(display("Table {} not found in partition {}", table, partition))] TableNotFoundInPartition { table: u32, partition: String }, #[snafu(display("Attempt to write table batch without a name"))] TableWriteWithoutName, #[snafu(display("Error restoring WAL entry, missing partition key"))] MissingPartitionKey, } pub type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] pub struct Partition { pub key: String, /// `dictionary` maps &str -> u32. The u32s are used in place of String or str to avoid slow /// string operations. The same dictionary is used for table names, tag names, tag values, and /// column names. // TODO: intern string field values too? pub dictionary: Dictionary, /// map of the dictionary ID for the table name to the table pub tables: HashMap<u32, Table>, pub is_open: bool, } /// Describes the result of translating a set of strings into /// partition specific ids #[derive(Debug, PartialEq, Eq)] pub enum
{ /// At least one of the strings was not present in the partitions' /// dictionary. /// /// This is important when testing for the presence of all ids in /// a set, as we know they can not all be present AtLeastOneMissing, /// All strings existed in this partition's dictionary Present(BTreeSet<u32>), } /// a 'Compiled' set of predicates / filters that can be evaluated on /// this partition (where strings have been translated to partition /// specific u32 ids) #[derive(Debug)] pub struct PartitionPredicate { /// If present, restrict the request to just those tables whose /// names are in table_names. If present but empty, means there /// was a predicate but no tables named that way exist in the /// partition (so no table can pass) pub table_name_predicate: Option<BTreeSet<u32>>, // Optional field column selection. If present, further restrict // any field columns returnedto only those named pub field_restriction: Option<BTreeSet<u32>>, /// General DataFusion expressions (arbitrary predicates) applied /// as a filter using logical conjuction (aka are 'AND'ed /// together). Only rows that evaluate to TRUE for all these /// expressions should be returned. pub partition_exprs: Vec<Expr>, /// If Some, then the table must contain all columns specified /// to pass the predicate pub required_columns: Option<PartitionIdSet>, /// The id of the "time" column in this partition pub time_column_id: u32, /// Timestamp range: only rows within this range should be considered pub range: Option<TimestampRange>, } impl PartitionPredicate { /// Creates and adds a datafuson predicate representing the /// combination of predicate and timestamp. pub fn filter_expr(&self) -> Option<Expr> { // build up a list of expressions let mut builder = AndExprBuilder::default().append_opt(self.make_timestamp_predicate_expr()); for expr in &self.partition_exprs { builder = builder.append_expr(expr.clone()); } builder.build() } /// Return true if there is a non empty field restriction pub fn has_field_restriction(&self) -> bool { match &self.field_restriction { None => false, Some(field_restiction) =>!field_restiction.is_empty(), } } /// For plans which select a subset of fields, returns true if /// the field should be included in the results pub fn should_include_field(&self, field_id: u32) -> bool { match &self.field_restriction { None => true, Some(field_restriction) => field_restriction.contains(&field_id), } } /// Return true if this column is the time column pub fn is_time_column(&self, id: u32) -> bool { self.time_column_id == id } /// Creates a DataFusion predicate for appliying a timestamp range: /// /// range.start <= time and time < range.end` fn make_timestamp_predicate_expr(&self) -> Option<Expr> { self.range.map(|range| make_range_expr(&range)) } } /// Creates expression like: /// range.low <= time && time < range.high fn make_range_expr(range: &TimestampRange) -> Expr { let ts_low = Expr::BinaryExpr { left: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.start)))), op: Operator::LtEq, right: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), }; let ts_high = Expr::BinaryExpr { left: Box::new(Expr::Column(TIME_COLUMN_NAME.into())), op: Operator::Lt, right: Box::new(Expr::Literal(ScalarValue::Int64(Some(range.end)))), }; AndExprBuilder::default() .append_expr(ts_low) .append_expr(ts_high) .build() .unwrap() } impl Partition { pub fn new(key: impl Into<String>) -> Self { Self { key: key.into(), dictionary: Dictionary::new(), tables: HashMap::new(), is_open: true, } } pub fn write_entry(&mut self, entry: &wb::WriteBufferEntry<'_>) -> Result<()> { if let Some(table_batches) = entry.table_batches() { for batch in table_batches { self.write_table_batch(&batch)?; } } Ok(()) } fn write_table_batch(&mut self, batch: &wb::TableWriteBatch<'_>) -> Result<()> { let table_name = batch.name().context(TableWriteWithoutName)?; let table_id = self.dictionary.lookup_value_or_insert(table_name); let table = self .tables .entry(table_id) .or_insert_with(|| Table::new(table_id)); if let Some(rows) = batch.rows() { table .append_rows(&mut self.dictionary, &rows) .context(TableWrite { table_name })?; } Ok(()) } /// Translates `predicate` into per-partition ids that can be /// directly evaluated against tables in this partition pub fn compile_predicate(&self, predicate: &Predicate) -> Result<PartitionPredicate> { let table_name_predicate = self.compile_string_list(predicate.table_names.as_ref()); let field_restriction = self.compile_string_list(predicate.field_columns.as_ref()); let time_column_id = self .dictionary .lookup_value(TIME_COLUMN_NAME) .expect("time is in the partition dictionary"); let range = predicate.range; // it would be nice to avoid cloning all the exprs here. let partition_exprs = predicate.exprs.clone(); // In order to evaluate expressions in the table, all columns // referenced in the expression must appear (I think, not sure // about NOT, etc so panic if we see one of those); let mut visitor = SupportVisitor {}; let mut predicate_columns: HashSet<String> = HashSet::new(); for expr in &partition_exprs { visit_expression(expr, &mut visitor); expr_to_column_names(&expr, &mut predicate_columns).unwrap(); } // if there are any column references in the expression, ensure they appear in any table let required_columns = if predicate_columns.is_empty() { None } else { Some(self.make_partition_ids(predicate_columns.iter())) }; Ok(PartitionPredicate { table_name_predicate, field_restriction, partition_exprs, required_columns, time_column_id, range, }) } /// Converts a potential set of strings into a set of ids in terms /// of this dictionary. If there are no matching Strings in the /// partitions dictionary, those strings are ignored and a /// (potentially empty) set is returned. fn compile_string_list(&self, names: Option<&BTreeSet<String>>) -> Option<BTreeSet<u32>> { names.map(|names| { names .iter() .filter_map(|name| self.dictionary.id(name)) .collect::<BTreeSet<_>>() }) } /// Adds the ids of any columns in additional_required_columns to the required columns of predicate pub fn add_required_columns_to_predicate( &self, additional_required_columns: &HashSet<String>, predicate: &mut PartitionPredicate, ) { for column_name in additional_required_columns { // Once know we have missing columns, no need to try // and figure out if these any additional columns are needed if Some(PartitionIdSet::AtLeastOneMissing) == predicate.required_columns { return; } let column_id = self.dictionary.id(column_name); // Update the required colunm list predicate.required_columns = Some(match predicate.required_columns.take() { None => { if let Some(column_id) = column_id { let mut symbols = BTreeSet::new(); symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::Present(mut symbols)) => { if let Some(column_id) = column_id { symbols.insert(column_id); PartitionIdSet::Present(symbols) } else { PartitionIdSet::AtLeastOneMissing } } Some(PartitionIdSet::AtLeastOneMissing) => { unreachable!("Covered by case above while adding required columns to predicate") } }); } } /// returns true if data with partition key `key` should be /// written to this partition, pub fn should_write(&self, key: &str) -> bool { self.key.starts_with(key) && self.is_open } /// Convert the table specified in this partition into an arrow record batch pub fn table_to_arrow(&self, table_name: &str, columns: &[&str]) -> Result<RecordBatch> { let table_id = self.dictionary .lookup_value(table_name) .context(TableNameNotFoundInDictionary { table: table_name, partition: &self.key, })?; let table = self .tables .get(&table_id) .context(TableNotFoundInPartition { table: table_id, partition: &self.key, })?; table .to_arrow(&self, columns) .context(NamedTableError { table_name }) } /// Translate a bunch of strings into a set of ids relative to this partition pub fn make_partition_ids<'a, I>(&self, predicate_columns: I) -> PartitionIdSet where I: Iterator<Item = &'a String>, { let mut symbols = BTreeSet::new(); for column_name in predicate_columns { if let Some(column_id) = self.dictionary.id(column_name) { symbols.insert(column_id); } else { return PartitionIdSet::AtLeastOneMissing; } } PartitionIdSet::Present(symbols) } } /// Used to figure out if we know how to deal with this kind of /// predicate in the write buffer struct SupportVisitor {} impl ExpressionVisitor for SupportVisitor { fn pre_visit(&mut self, expr: &Expr) { match expr { Expr::Literal(..) => {} Expr::Column(..) => {} Expr::BinaryExpr { op,.. } => { match op { Operator::Eq | Operator::Lt | Operator::LtEq | Operator::Gt | Operator::GtEq | Operator::Plus | Operator::Minus | Operator::Multiply | Operator::Divide | Operator::And | Operator::Or => {} // Unsupported (need to think about ramifications) Operator::NotEq | Operator::Modulus | Operator::Like | Operator::NotLike => { panic!("Unsupported binary operator in expression: {:?}", expr) } } } _ => panic!( "Unsupported expression in write_buffer database: {:?}", expr ), } } } #[derive(Default, Debug)] pub struct RestorationStats { pub row_count: usize, pub tables: BTreeSet<String>, } /// Given a set of WAL entries, restore them into a set of Partitions. pub fn restore_partitions_from_wal( wal_entries: impl Iterator<Item = WalResult<WalEntry>>, ) -> Result<(Vec<Partition>, RestorationStats)> { let mut stats = RestorationStats::default(); let mut partitions = BTreeMap::new(); for wal_entry in wal_entries { let wal_entry = wal_entry.context(WalEntryRead)?; let bytes = wal_entry.as_data(); let batch = flatbuffers::get_root::<wb::WriteBufferBatch<'_>>(&bytes); if let Some(entries) = batch.entries() { for entry in entries { let partition_key = entry.partition_key().context(MissingPartitionKey)?; if!partitions.contains_key(partition_key) { partitions.insert( partition_key.to_string(), Partition::new(partition_key.to_string()), ); } let partition = partitions .get_mut(partition_key) .context(PartitionNotFound { partition: partition_key, })?; partition.write_entry(&entry)?; } } } let partitions = partitions .into_iter() .map(|(_, p)| p) .collect::<Vec<Partition>>(); // compute the stats for p in &partitions { for (id, table) in &p.tables { let name = p .dictionary .lookup_id(*id) .expect("table id wasn't inserted into dictionary on restore"); if!stats.tables.contains(name) { stats.tables.insert(name.to_string()); } stats.row_count += table.row_count(); } } Ok((partitions, stats)) } #[cfg(test)] mod tests { use super::*; #[test] fn test_make_range_expr() { // Test that the generated predicate is correct let range = TimestampRange::new(101, 202); let ts_predicate_expr = make_range_expr(&range); let expected_string = "Int64(101) LtEq #time And #time Lt Int64(202)"; let actual_string = format!("{:?}", ts_predicate_expr); assert_eq!(actual_string, expected_string); } }
PartitionIdSet
identifier_name
mod.rs
ToTaskNum(usize), AllHistory, } impl From<&usize> for ResponseType { fn from(u: &usize) -> Self { ResponseType::ToTaskNum(*u) } } // :shrug: impl From<&ResponseType> for ResponseType { fn from(r: &ResponseType) -> Self { *r } } /// Given identifiers for a workflow/run, and a test history builder, construct an instance of /// the core SDK with a mock server gateway that will produce the responses as appropriate. /// /// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For /// each number in the input list, a fake response will be prepared which includes history up to the /// workflow task with that number, as in [TestHistoryBuilder::get_history_info]. pub(crate) fn build_fake_core( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, ) -> CoreSDK { let response_batches = response_batches.into_iter().map(Into::into).collect(); let mock_gateway = build_multihist_mock_sg( vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches, task_q: TEST_Q.to_owned(), }], true, None, ); mock_core(mock_gateway) } pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { mock_core_with_opts(mocks, CoreInitOptionsBuilder::default()) } pub(crate) fn mock_core_with_opts<SG>( mocks: MocksHolder<SG>, opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { let mut core = mock_core_with_opts_no_workers(mocks.sg, opts); register_mock_workers(&mut core, mocks.mock_pollers.into_values()); core } pub(crate) fn register_mock_workers( core: &mut CoreSDK, mocks: impl IntoIterator<Item = MockWorker>, ) { for worker in mocks { core.reg_worker_sync(worker); } } pub(crate) fn mock_core_with_opts_no_workers<SG>( sg: SG, mut opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap()) } pub struct FakeWfResponses { pub wf_id: String, pub hist: TestHistoryBuilder, pub response_batches: Vec<ResponseType>, pub task_q: String, } // TODO: turn this into a builder or make a new one? to make all these different build fns simpler pub struct MocksHolder<SG> { sg: SG, mock_pollers: HashMap<String, MockWorker>, pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, } impl<SG> MocksHolder<SG> { pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) { if let Some(w) = self.mock_pollers.get_mut(task_q) { mutator(&mut w.config); } } pub fn take_pollers(self) -> HashMap<String, MockWorker> { self.mock_pollers } } pub struct MockWorker { pub wf_poller: BoxedWFPoller, pub act_poller: Option<BoxedActPoller>, pub config: WorkerConfig, } impl Default for MockWorker { fn default() -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default_test_q(), } } } impl MockWorker { pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self { MockWorker { wf_poller, act_poller: None, config: WorkerConfig::default(q), } } pub fn for_queue(q: &str) -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default(q), } } } impl<SG> MocksHolder<SG> where SG: ServerGatewayApis + Send + Sync +'static, { pub fn from_mock_workers( sg: SG, mock_workers: impl IntoIterator<Item = MockWorker>, ) -> MocksHolder<SG> { let mock_pollers = mock_workers .into_iter() .map(|w| (w.config.task_queue.clone(), w)) .collect(); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } /// Uses the provided list of tasks to create a mock poller for the `TEST_Q` pub fn from_gateway_with_responses( sg: SG, wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>, act_tasks: VecDeque<PollActivityTaskQueueResponse>, ) -> MocksHolder<SG> { let mut mock_pollers = HashMap::new(); let mock_poller = mock_poller_from_resps(wf_tasks); let mock_act_poller = mock_poller_from_resps(act_tasks); mock_pollers.insert( TEST_Q.to_string(), MockWorker { wf_poller: mock_poller, act_poller: Some(mock_act_poller), config: WorkerConfigBuilder::default() .task_queue(TEST_Q) .build() .unwrap(), }, ); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } } pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T> where T: Send + Sync +'static, { let mut mock_poller = mock_poller(); mock_poller.expect_poll().returning(move || { if let Some(t) = tasks.pop_front() { Some(Ok(t)) } else { Some(Err(tonic::Status::out_of_range( "Ran out of mock responses!", ))) } }); Box::new(mock_poller) as BoxedPoller<T> } pub fn mock_poller<T>() -> MockPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockPoller::new(); mock_poller.expect_shutdown_box().return_const(()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } pub fn mock_manual_poller<T>() -> MockManualPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockManualPoller::new(); mock_poller .expect_shutdown_box() .returning(|| async {}.boxed()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } /// Build a mock server gateway capable of returning multiple different histories for different /// workflows. It does so by tracking outstanding workflow tasks like is also happening in core /// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little /// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not /// returned. If there is not, the next batch of history is returned for any workflow without an /// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction. /// /// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks /// sent to the server. pub fn build_multihist_mock_sg( hists: impl IntoIterator<Item = FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> MocksHolder<MockServerGatewayApis> { let mh = MockPollCfg::new( hists.into_iter().collect(), enforce_correct_number_of_polls, num_expected_fails, ); build_mock_pollers(mh) } /// See [build_multihist_mock_sg] -- one history convenience version pub fn single_hist_mock_sg( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, enforce_num_polls: bool, ) -> MocksHolder<MockServerGatewayApis> { let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway); mh.enforce_correct_number_of_polls = enforce_num_polls; build_mock_pollers(mh) } pub struct MockPollCfg { pub hists: Vec<FakeWfResponses>, pub enforce_correct_number_of_polls: bool, pub num_expected_fails: Option<usize>, pub mock_gateway: MockServerGatewayApis, /// All calls to fail WFTs must match this predicate pub expect_fail_wft_matcher: Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>, } impl MockPollCfg { pub fn new( hists: Vec<FakeWfResponses>, enforce_correct_number_of_polls: bool,
num_expected_fails, mock_gateway: MockServerGatewayApis::new(), expect_fail_wft_matcher: Box::new(|_, _, _| true), } } pub fn from_resp_batches( wf_id: &str, t: TestHistoryBuilder, resps: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, ) -> Self { Self { hists: vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches: resps.into_iter().map(Into::into).collect(), task_q: TEST_Q.to_owned(), }], enforce_correct_number_of_polls: true, num_expected_fails: None, mock_gateway, expect_fail_wft_matcher: Box::new(|_, _, _| true), } } } /// Given an iterable of fake responses, return the mocks & associated data to work with them pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> { // Maps task queues to maps of wfid -> responses let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new(); let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new())); let mut correct_num_polls = None; for hist in cfg.hists { let full_hist_info = hist.hist.get_full_history_info().unwrap(); // Ensure no response batch is trying to return more tasks than the history contains for respt in &hist.response_batches { if let ResponseType::ToTaskNum(rb_wf_num) = respt { assert!( *rb_wf_num <= full_hist_info.wf_task_count(), "Wf task count {} is not <= total task count {}", rb_wf_num, full_hist_info.wf_task_count() ); } } // TODO: Fix -- or not? Sticky invalidation could make this pointless anyway // Verify response batches only ever return longer histories (IE: Are sorted ascending) // assert!( // hist.response_batches // .as_slice() // .windows(2) // .all(|w| w[0] <= w[1]), // "response batches must have increasing wft numbers" // ); if cfg.enforce_correct_number_of_polls { *correct_num_polls.get_or_insert(0) += hist.response_batches.len(); } // Convert history batches into poll responses, while also tracking how many times a given // history has been returned so we can increment the associated attempt number on the WFT. // NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode. // Such usages need a history different from other eviction modes which would include // WFT timeouts or something to simulate the task getting dropped. let mut attempts_at_task_num = HashMap::new(); let responses: Vec<_> = hist .response_batches .iter() .map(|to_task_num| { let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1); let mut r = hist_to_poll_resp( &hist.hist, hist.wf_id.to_owned(), *to_task_num, hist.task_q.clone(), ); r.attempt = *cur_attempt; *cur_attempt += 1; r }) .collect(); let tasks = VecDeque::from(responses); task_queues_to_resps .entry(hist.task_q) .or_default() .insert(hist.wf_id, tasks); } let mut mock_pollers = HashMap::new(); for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() { let mut mock_poller = mock_poller(); // The poller will return history from any workflow runs that do not have currently // outstanding tasks. let outstanding = outstanding_wf_task_tokens.clone(); mock_poller .expect_poll() .times( correct_num_polls .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move || { for (_, tasks) in queue_tasks.iter_mut() { // Must extract run id from a workflow task associated with this workflow // TODO: Case where run id changes for same workflow id is not handled here if let Some(t) = tasks.get(0) { let rid = t.workflow_execution.as_ref().unwrap().run_id.clone(); if!outstanding.read().contains_left(&rid) { let t = tasks.pop_front().unwrap(); outstanding .write() .insert(rid, TaskToken(t.task_token.clone())); return Some(Ok(t)); } } } Some(Err(tonic::Status::cancelled("No more work to do"))) }); let mw = MockWorker::new(&task_q, Box::from(mock_poller)); mock_pollers.insert(task_q, mw); } let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_complete_workflow_task() .returning(move |comp| { outstanding.write().remove_by_right(&comp.task_token); Ok(RespondWorkflowTaskCompletedResponse::default()) }); let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_fail_workflow_task() .withf(cfg.expect_fail_wft_matcher) .times( cfg.num_expected_fails .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move |tt, _, _| { outstanding.write().remove_by_right(&tt); Ok(Default::default()) }); cfg.mock_gateway .expect_start_workflow() .returning(|_, _, _, _, _| Ok(Default::default())); MocksHolder { sg: cfg.mock_gateway, mock_pollers, outstanding_task_map: Some(outstanding_wf_task_tokens), } } pub fn hist_to_poll_resp( t: &TestHistoryBuilder, wf_id: String, response_type: ResponseType, task_queue: String, ) -> PollWorkflowTaskQueueResponse { let run_id = t.get_orig_run_id(); let wf = WorkflowExecution { workflow_id: wf_id, run_id: run_id.to_string(), }; let hist_info = match response_type { ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(), ResponseType::AllHistory => t.get_full_history_info().unwrap(), }; let batch = hist_info.events().to_vec(); let task_token: [u8; 16] = thread_rng().gen(); PollWorkflowTaskQueueResponse { history: Some(History { events: batch }), workflow_execution: Some(wf), task_token: task_token.to_vec(), workflow_type: Some(WorkflowType { name: DEFAULT_WORKFLOW_TYPE.to_owned(), }), workflow_execution_task_queue: Some(TaskQueue { name: task_queue, kind: TaskQueueKind::Normal as i32, }), previous_started_event_id: hist_info.previous_started_event_id, started_event_id: hist_info.workflow_task_started_event_id, ..Default::default() } } pub fn fake_sg_opts() -> ServerGatewayOptions { ServerGatewayOptions { target_url: Url::from_str("https://fake").unwrap(), namespace: "".to_string(), client_name: "".to_string(), client_version: "".to_string(), static_headers: Default::default(), identity: "".to_string(), worker_binary_id: "".to_string(), tls_cfg: None, retry_config: Default::default(), } } type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status); /// This function accepts a list of asserts and replies to workflow activations to run against the /// provided instance of fake core. /// /// It handles the business of re-sending the same activation replies over again in the event /// of eviction or workflow activation failure. Activation failures specifically are only run once, /// since they clearly can't be returned every time we replay the workflow, or it could never /// proceed pub(crate) async fn poll_and_reply<'a>( core: &'a CoreSDK, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await } pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>( core: &'a CoreSDK, outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { let mut evictions = 0; let expected_evictions = expect_and_reply.len() - 1; let mut executed_failures = HashSet::new(); let expected_fail_count = expect_and_reply .iter() .filter(|(_, reply)|!reply.is_success()) .count(); 'outer: loop { let expect_iter = expect_and_reply.iter(); for (i, interaction) in expect_iter.enumerate() { let (asserter, reply) = interaction; let complete_is_failure =!reply.is_success(); // Only send activation failures once if executed_failures.contains(&i) { continue; } let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap(); let contains_eviction = res.eviction_index(); if let Some(eviction_job_ix) = contains_eviction { // If the job list has an eviction, make sure it was the last item in the list // then remove it, since in the tests we don't explicitly specify evict assertions assert_eq!( eviction_job_ix, res.jobs.len() - 1, "Eviction job was not last job in job list" ); res.jobs.remove(eviction_job_ix); if let Some(omap) = outstanding_map.as_ref() { omap.write().remove_by_left(&res.run_id);
num_expected_fails: Option<usize>, ) -> Self { Self { hists, enforce_correct_number_of_polls,
random_line_split
mod.rs
ToTaskNum(usize), AllHistory, } impl From<&usize> for ResponseType { fn from(u: &usize) -> Self { ResponseType::ToTaskNum(*u) } } // :shrug: impl From<&ResponseType> for ResponseType { fn from(r: &ResponseType) -> Self { *r } } /// Given identifiers for a workflow/run, and a test history builder, construct an instance of /// the core SDK with a mock server gateway that will produce the responses as appropriate. /// /// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For /// each number in the input list, a fake response will be prepared which includes history up to the /// workflow task with that number, as in [TestHistoryBuilder::get_history_info]. pub(crate) fn build_fake_core( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, ) -> CoreSDK { let response_batches = response_batches.into_iter().map(Into::into).collect(); let mock_gateway = build_multihist_mock_sg( vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches, task_q: TEST_Q.to_owned(), }], true, None, ); mock_core(mock_gateway) } pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { mock_core_with_opts(mocks, CoreInitOptionsBuilder::default()) } pub(crate) fn mock_core_with_opts<SG>( mocks: MocksHolder<SG>, opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { let mut core = mock_core_with_opts_no_workers(mocks.sg, opts); register_mock_workers(&mut core, mocks.mock_pollers.into_values()); core } pub(crate) fn register_mock_workers( core: &mut CoreSDK, mocks: impl IntoIterator<Item = MockWorker>, ) { for worker in mocks { core.reg_worker_sync(worker); } } pub(crate) fn mock_core_with_opts_no_workers<SG>( sg: SG, mut opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap()) } pub struct FakeWfResponses { pub wf_id: String, pub hist: TestHistoryBuilder, pub response_batches: Vec<ResponseType>, pub task_q: String, } // TODO: turn this into a builder or make a new one? to make all these different build fns simpler pub struct MocksHolder<SG> { sg: SG, mock_pollers: HashMap<String, MockWorker>, pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, } impl<SG> MocksHolder<SG> { pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) { if let Some(w) = self.mock_pollers.get_mut(task_q) { mutator(&mut w.config); } } pub fn take_pollers(self) -> HashMap<String, MockWorker> { self.mock_pollers } } pub struct MockWorker { pub wf_poller: BoxedWFPoller, pub act_poller: Option<BoxedActPoller>, pub config: WorkerConfig, } impl Default for MockWorker { fn default() -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default_test_q(), } } } impl MockWorker { pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self { MockWorker { wf_poller, act_poller: None, config: WorkerConfig::default(q), } } pub fn for_queue(q: &str) -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default(q), } } } impl<SG> MocksHolder<SG> where SG: ServerGatewayApis + Send + Sync +'static, { pub fn from_mock_workers( sg: SG, mock_workers: impl IntoIterator<Item = MockWorker>, ) -> MocksHolder<SG> { let mock_pollers = mock_workers .into_iter() .map(|w| (w.config.task_queue.clone(), w)) .collect(); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } /// Uses the provided list of tasks to create a mock poller for the `TEST_Q` pub fn from_gateway_with_responses( sg: SG, wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>, act_tasks: VecDeque<PollActivityTaskQueueResponse>, ) -> MocksHolder<SG> { let mut mock_pollers = HashMap::new(); let mock_poller = mock_poller_from_resps(wf_tasks); let mock_act_poller = mock_poller_from_resps(act_tasks); mock_pollers.insert( TEST_Q.to_string(), MockWorker { wf_poller: mock_poller, act_poller: Some(mock_act_poller), config: WorkerConfigBuilder::default() .task_queue(TEST_Q) .build() .unwrap(), }, ); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } } pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T> where T: Send + Sync +'static, { let mut mock_poller = mock_poller(); mock_poller.expect_poll().returning(move || { if let Some(t) = tasks.pop_front()
else { Some(Err(tonic::Status::out_of_range( "Ran out of mock responses!", ))) } }); Box::new(mock_poller) as BoxedPoller<T> } pub fn mock_poller<T>() -> MockPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockPoller::new(); mock_poller.expect_shutdown_box().return_const(()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } pub fn mock_manual_poller<T>() -> MockManualPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockManualPoller::new(); mock_poller .expect_shutdown_box() .returning(|| async {}.boxed()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } /// Build a mock server gateway capable of returning multiple different histories for different /// workflows. It does so by tracking outstanding workflow tasks like is also happening in core /// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little /// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not /// returned. If there is not, the next batch of history is returned for any workflow without an /// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction. /// /// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks /// sent to the server. pub fn build_multihist_mock_sg( hists: impl IntoIterator<Item = FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> MocksHolder<MockServerGatewayApis> { let mh = MockPollCfg::new( hists.into_iter().collect(), enforce_correct_number_of_polls, num_expected_fails, ); build_mock_pollers(mh) } /// See [build_multihist_mock_sg] -- one history convenience version pub fn single_hist_mock_sg( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, enforce_num_polls: bool, ) -> MocksHolder<MockServerGatewayApis> { let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway); mh.enforce_correct_number_of_polls = enforce_num_polls; build_mock_pollers(mh) } pub struct MockPollCfg { pub hists: Vec<FakeWfResponses>, pub enforce_correct_number_of_polls: bool, pub num_expected_fails: Option<usize>, pub mock_gateway: MockServerGatewayApis, /// All calls to fail WFTs must match this predicate pub expect_fail_wft_matcher: Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>, } impl MockPollCfg { pub fn new( hists: Vec<FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> Self { Self { hists, enforce_correct_number_of_polls, num_expected_fails, mock_gateway: MockServerGatewayApis::new(), expect_fail_wft_matcher: Box::new(|_, _, _| true), } } pub fn from_resp_batches( wf_id: &str, t: TestHistoryBuilder, resps: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, ) -> Self { Self { hists: vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches: resps.into_iter().map(Into::into).collect(), task_q: TEST_Q.to_owned(), }], enforce_correct_number_of_polls: true, num_expected_fails: None, mock_gateway, expect_fail_wft_matcher: Box::new(|_, _, _| true), } } } /// Given an iterable of fake responses, return the mocks & associated data to work with them pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> { // Maps task queues to maps of wfid -> responses let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new(); let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new())); let mut correct_num_polls = None; for hist in cfg.hists { let full_hist_info = hist.hist.get_full_history_info().unwrap(); // Ensure no response batch is trying to return more tasks than the history contains for respt in &hist.response_batches { if let ResponseType::ToTaskNum(rb_wf_num) = respt { assert!( *rb_wf_num <= full_hist_info.wf_task_count(), "Wf task count {} is not <= total task count {}", rb_wf_num, full_hist_info.wf_task_count() ); } } // TODO: Fix -- or not? Sticky invalidation could make this pointless anyway // Verify response batches only ever return longer histories (IE: Are sorted ascending) // assert!( // hist.response_batches // .as_slice() // .windows(2) // .all(|w| w[0] <= w[1]), // "response batches must have increasing wft numbers" // ); if cfg.enforce_correct_number_of_polls { *correct_num_polls.get_or_insert(0) += hist.response_batches.len(); } // Convert history batches into poll responses, while also tracking how many times a given // history has been returned so we can increment the associated attempt number on the WFT. // NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode. // Such usages need a history different from other eviction modes which would include // WFT timeouts or something to simulate the task getting dropped. let mut attempts_at_task_num = HashMap::new(); let responses: Vec<_> = hist .response_batches .iter() .map(|to_task_num| { let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1); let mut r = hist_to_poll_resp( &hist.hist, hist.wf_id.to_owned(), *to_task_num, hist.task_q.clone(), ); r.attempt = *cur_attempt; *cur_attempt += 1; r }) .collect(); let tasks = VecDeque::from(responses); task_queues_to_resps .entry(hist.task_q) .or_default() .insert(hist.wf_id, tasks); } let mut mock_pollers = HashMap::new(); for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() { let mut mock_poller = mock_poller(); // The poller will return history from any workflow runs that do not have currently // outstanding tasks. let outstanding = outstanding_wf_task_tokens.clone(); mock_poller .expect_poll() .times( correct_num_polls .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move || { for (_, tasks) in queue_tasks.iter_mut() { // Must extract run id from a workflow task associated with this workflow // TODO: Case where run id changes for same workflow id is not handled here if let Some(t) = tasks.get(0) { let rid = t.workflow_execution.as_ref().unwrap().run_id.clone(); if!outstanding.read().contains_left(&rid) { let t = tasks.pop_front().unwrap(); outstanding .write() .insert(rid, TaskToken(t.task_token.clone())); return Some(Ok(t)); } } } Some(Err(tonic::Status::cancelled("No more work to do"))) }); let mw = MockWorker::new(&task_q, Box::from(mock_poller)); mock_pollers.insert(task_q, mw); } let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_complete_workflow_task() .returning(move |comp| { outstanding.write().remove_by_right(&comp.task_token); Ok(RespondWorkflowTaskCompletedResponse::default()) }); let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_fail_workflow_task() .withf(cfg.expect_fail_wft_matcher) .times( cfg.num_expected_fails .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move |tt, _, _| { outstanding.write().remove_by_right(&tt); Ok(Default::default()) }); cfg.mock_gateway .expect_start_workflow() .returning(|_, _, _, _, _| Ok(Default::default())); MocksHolder { sg: cfg.mock_gateway, mock_pollers, outstanding_task_map: Some(outstanding_wf_task_tokens), } } pub fn hist_to_poll_resp( t: &TestHistoryBuilder, wf_id: String, response_type: ResponseType, task_queue: String, ) -> PollWorkflowTaskQueueResponse { let run_id = t.get_orig_run_id(); let wf = WorkflowExecution { workflow_id: wf_id, run_id: run_id.to_string(), }; let hist_info = match response_type { ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(), ResponseType::AllHistory => t.get_full_history_info().unwrap(), }; let batch = hist_info.events().to_vec(); let task_token: [u8; 16] = thread_rng().gen(); PollWorkflowTaskQueueResponse { history: Some(History { events: batch }), workflow_execution: Some(wf), task_token: task_token.to_vec(), workflow_type: Some(WorkflowType { name: DEFAULT_WORKFLOW_TYPE.to_owned(), }), workflow_execution_task_queue: Some(TaskQueue { name: task_queue, kind: TaskQueueKind::Normal as i32, }), previous_started_event_id: hist_info.previous_started_event_id, started_event_id: hist_info.workflow_task_started_event_id, ..Default::default() } } pub fn fake_sg_opts() -> ServerGatewayOptions { ServerGatewayOptions { target_url: Url::from_str("https://fake").unwrap(), namespace: "".to_string(), client_name: "".to_string(), client_version: "".to_string(), static_headers: Default::default(), identity: "".to_string(), worker_binary_id: "".to_string(), tls_cfg: None, retry_config: Default::default(), } } type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status); /// This function accepts a list of asserts and replies to workflow activations to run against the /// provided instance of fake core. /// /// It handles the business of re-sending the same activation replies over again in the event /// of eviction or workflow activation failure. Activation failures specifically are only run once, /// since they clearly can't be returned every time we replay the workflow, or it could never /// proceed pub(crate) async fn poll_and_reply<'a>( core: &'a CoreSDK, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await } pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>( core: &'a CoreSDK, outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { let mut evictions = 0; let expected_evictions = expect_and_reply.len() - 1; let mut executed_failures = HashSet::new(); let expected_fail_count = expect_and_reply .iter() .filter(|(_, reply)|!reply.is_success()) .count(); 'outer: loop { let expect_iter = expect_and_reply.iter(); for (i, interaction) in expect_iter.enumerate() { let (asserter, reply) = interaction; let complete_is_failure =!reply.is_success(); // Only send activation failures once if executed_failures.contains(&i) { continue; } let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap(); let contains_eviction = res.eviction_index(); if let Some(eviction_job_ix) = contains_eviction { // If the job list has an eviction, make sure it was the last item in the list // then remove it, since in the tests we don't explicitly specify evict assertions assert_eq!( eviction_job_ix, res.jobs.len() - 1, "Eviction job was not last job in job list" ); res.jobs.remove(eviction_job_ix); if let Some(omap) = outstanding_map.as_ref() { omap.write().remove_by_left(&res.run_id);
{ Some(Ok(t)) }
conditional_block
mod.rs
ToTaskNum(usize), AllHistory, } impl From<&usize> for ResponseType { fn from(u: &usize) -> Self { ResponseType::ToTaskNum(*u) } } // :shrug: impl From<&ResponseType> for ResponseType { fn from(r: &ResponseType) -> Self { *r } } /// Given identifiers for a workflow/run, and a test history builder, construct an instance of /// the core SDK with a mock server gateway that will produce the responses as appropriate. /// /// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For /// each number in the input list, a fake response will be prepared which includes history up to the /// workflow task with that number, as in [TestHistoryBuilder::get_history_info]. pub(crate) fn build_fake_core( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, ) -> CoreSDK { let response_batches = response_batches.into_iter().map(Into::into).collect(); let mock_gateway = build_multihist_mock_sg( vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches, task_q: TEST_Q.to_owned(), }], true, None, ); mock_core(mock_gateway) } pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { mock_core_with_opts(mocks, CoreInitOptionsBuilder::default()) } pub(crate) fn mock_core_with_opts<SG>( mocks: MocksHolder<SG>, opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { let mut core = mock_core_with_opts_no_workers(mocks.sg, opts); register_mock_workers(&mut core, mocks.mock_pollers.into_values()); core } pub(crate) fn register_mock_workers( core: &mut CoreSDK, mocks: impl IntoIterator<Item = MockWorker>, ) { for worker in mocks { core.reg_worker_sync(worker); } } pub(crate) fn mock_core_with_opts_no_workers<SG>( sg: SG, mut opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap()) } pub struct FakeWfResponses { pub wf_id: String, pub hist: TestHistoryBuilder, pub response_batches: Vec<ResponseType>, pub task_q: String, } // TODO: turn this into a builder or make a new one? to make all these different build fns simpler pub struct MocksHolder<SG> { sg: SG, mock_pollers: HashMap<String, MockWorker>, pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, } impl<SG> MocksHolder<SG> { pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) { if let Some(w) = self.mock_pollers.get_mut(task_q) { mutator(&mut w.config); } } pub fn take_pollers(self) -> HashMap<String, MockWorker> { self.mock_pollers } } pub struct MockWorker { pub wf_poller: BoxedWFPoller, pub act_poller: Option<BoxedActPoller>, pub config: WorkerConfig, } impl Default for MockWorker { fn default() -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default_test_q(), } } } impl MockWorker { pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self { MockWorker { wf_poller, act_poller: None, config: WorkerConfig::default(q), } } pub fn for_queue(q: &str) -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default(q), } } } impl<SG> MocksHolder<SG> where SG: ServerGatewayApis + Send + Sync +'static, { pub fn from_mock_workers( sg: SG, mock_workers: impl IntoIterator<Item = MockWorker>, ) -> MocksHolder<SG> { let mock_pollers = mock_workers .into_iter() .map(|w| (w.config.task_queue.clone(), w)) .collect(); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } /// Uses the provided list of tasks to create a mock poller for the `TEST_Q` pub fn from_gateway_with_responses( sg: SG, wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>, act_tasks: VecDeque<PollActivityTaskQueueResponse>, ) -> MocksHolder<SG> { let mut mock_pollers = HashMap::new(); let mock_poller = mock_poller_from_resps(wf_tasks); let mock_act_poller = mock_poller_from_resps(act_tasks); mock_pollers.insert( TEST_Q.to_string(), MockWorker { wf_poller: mock_poller, act_poller: Some(mock_act_poller), config: WorkerConfigBuilder::default() .task_queue(TEST_Q) .build() .unwrap(), }, ); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } } pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T> where T: Send + Sync +'static, { let mut mock_poller = mock_poller(); mock_poller.expect_poll().returning(move || { if let Some(t) = tasks.pop_front() { Some(Ok(t)) } else { Some(Err(tonic::Status::out_of_range( "Ran out of mock responses!", ))) } }); Box::new(mock_poller) as BoxedPoller<T> } pub fn mock_poller<T>() -> MockPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockPoller::new(); mock_poller.expect_shutdown_box().return_const(()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } pub fn mock_manual_poller<T>() -> MockManualPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockManualPoller::new(); mock_poller .expect_shutdown_box() .returning(|| async {}.boxed()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } /// Build a mock server gateway capable of returning multiple different histories for different /// workflows. It does so by tracking outstanding workflow tasks like is also happening in core /// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little /// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not /// returned. If there is not, the next batch of history is returned for any workflow without an /// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction. /// /// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks /// sent to the server. pub fn build_multihist_mock_sg( hists: impl IntoIterator<Item = FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> MocksHolder<MockServerGatewayApis> { let mh = MockPollCfg::new( hists.into_iter().collect(), enforce_correct_number_of_polls, num_expected_fails, ); build_mock_pollers(mh) } /// See [build_multihist_mock_sg] -- one history convenience version pub fn single_hist_mock_sg( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, enforce_num_polls: bool, ) -> MocksHolder<MockServerGatewayApis> { let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway); mh.enforce_correct_number_of_polls = enforce_num_polls; build_mock_pollers(mh) } pub struct MockPollCfg { pub hists: Vec<FakeWfResponses>, pub enforce_correct_number_of_polls: bool, pub num_expected_fails: Option<usize>, pub mock_gateway: MockServerGatewayApis, /// All calls to fail WFTs must match this predicate pub expect_fail_wft_matcher: Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>, } impl MockPollCfg { pub fn new( hists: Vec<FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> Self { Self { hists, enforce_correct_number_of_polls, num_expected_fails, mock_gateway: MockServerGatewayApis::new(), expect_fail_wft_matcher: Box::new(|_, _, _| true), } } pub fn from_resp_batches( wf_id: &str, t: TestHistoryBuilder, resps: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, ) -> Self { Self { hists: vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches: resps.into_iter().map(Into::into).collect(), task_q: TEST_Q.to_owned(), }], enforce_correct_number_of_polls: true, num_expected_fails: None, mock_gateway, expect_fail_wft_matcher: Box::new(|_, _, _| true), } } } /// Given an iterable of fake responses, return the mocks & associated data to work with them pub fn
(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> { // Maps task queues to maps of wfid -> responses let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new(); let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new())); let mut correct_num_polls = None; for hist in cfg.hists { let full_hist_info = hist.hist.get_full_history_info().unwrap(); // Ensure no response batch is trying to return more tasks than the history contains for respt in &hist.response_batches { if let ResponseType::ToTaskNum(rb_wf_num) = respt { assert!( *rb_wf_num <= full_hist_info.wf_task_count(), "Wf task count {} is not <= total task count {}", rb_wf_num, full_hist_info.wf_task_count() ); } } // TODO: Fix -- or not? Sticky invalidation could make this pointless anyway // Verify response batches only ever return longer histories (IE: Are sorted ascending) // assert!( // hist.response_batches // .as_slice() // .windows(2) // .all(|w| w[0] <= w[1]), // "response batches must have increasing wft numbers" // ); if cfg.enforce_correct_number_of_polls { *correct_num_polls.get_or_insert(0) += hist.response_batches.len(); } // Convert history batches into poll responses, while also tracking how many times a given // history has been returned so we can increment the associated attempt number on the WFT. // NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode. // Such usages need a history different from other eviction modes which would include // WFT timeouts or something to simulate the task getting dropped. let mut attempts_at_task_num = HashMap::new(); let responses: Vec<_> = hist .response_batches .iter() .map(|to_task_num| { let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1); let mut r = hist_to_poll_resp( &hist.hist, hist.wf_id.to_owned(), *to_task_num, hist.task_q.clone(), ); r.attempt = *cur_attempt; *cur_attempt += 1; r }) .collect(); let tasks = VecDeque::from(responses); task_queues_to_resps .entry(hist.task_q) .or_default() .insert(hist.wf_id, tasks); } let mut mock_pollers = HashMap::new(); for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() { let mut mock_poller = mock_poller(); // The poller will return history from any workflow runs that do not have currently // outstanding tasks. let outstanding = outstanding_wf_task_tokens.clone(); mock_poller .expect_poll() .times( correct_num_polls .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move || { for (_, tasks) in queue_tasks.iter_mut() { // Must extract run id from a workflow task associated with this workflow // TODO: Case where run id changes for same workflow id is not handled here if let Some(t) = tasks.get(0) { let rid = t.workflow_execution.as_ref().unwrap().run_id.clone(); if!outstanding.read().contains_left(&rid) { let t = tasks.pop_front().unwrap(); outstanding .write() .insert(rid, TaskToken(t.task_token.clone())); return Some(Ok(t)); } } } Some(Err(tonic::Status::cancelled("No more work to do"))) }); let mw = MockWorker::new(&task_q, Box::from(mock_poller)); mock_pollers.insert(task_q, mw); } let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_complete_workflow_task() .returning(move |comp| { outstanding.write().remove_by_right(&comp.task_token); Ok(RespondWorkflowTaskCompletedResponse::default()) }); let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_fail_workflow_task() .withf(cfg.expect_fail_wft_matcher) .times( cfg.num_expected_fails .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move |tt, _, _| { outstanding.write().remove_by_right(&tt); Ok(Default::default()) }); cfg.mock_gateway .expect_start_workflow() .returning(|_, _, _, _, _| Ok(Default::default())); MocksHolder { sg: cfg.mock_gateway, mock_pollers, outstanding_task_map: Some(outstanding_wf_task_tokens), } } pub fn hist_to_poll_resp( t: &TestHistoryBuilder, wf_id: String, response_type: ResponseType, task_queue: String, ) -> PollWorkflowTaskQueueResponse { let run_id = t.get_orig_run_id(); let wf = WorkflowExecution { workflow_id: wf_id, run_id: run_id.to_string(), }; let hist_info = match response_type { ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(), ResponseType::AllHistory => t.get_full_history_info().unwrap(), }; let batch = hist_info.events().to_vec(); let task_token: [u8; 16] = thread_rng().gen(); PollWorkflowTaskQueueResponse { history: Some(History { events: batch }), workflow_execution: Some(wf), task_token: task_token.to_vec(), workflow_type: Some(WorkflowType { name: DEFAULT_WORKFLOW_TYPE.to_owned(), }), workflow_execution_task_queue: Some(TaskQueue { name: task_queue, kind: TaskQueueKind::Normal as i32, }), previous_started_event_id: hist_info.previous_started_event_id, started_event_id: hist_info.workflow_task_started_event_id, ..Default::default() } } pub fn fake_sg_opts() -> ServerGatewayOptions { ServerGatewayOptions { target_url: Url::from_str("https://fake").unwrap(), namespace: "".to_string(), client_name: "".to_string(), client_version: "".to_string(), static_headers: Default::default(), identity: "".to_string(), worker_binary_id: "".to_string(), tls_cfg: None, retry_config: Default::default(), } } type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status); /// This function accepts a list of asserts and replies to workflow activations to run against the /// provided instance of fake core. /// /// It handles the business of re-sending the same activation replies over again in the event /// of eviction or workflow activation failure. Activation failures specifically are only run once, /// since they clearly can't be returned every time we replay the workflow, or it could never /// proceed pub(crate) async fn poll_and_reply<'a>( core: &'a CoreSDK, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await } pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>( core: &'a CoreSDK, outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { let mut evictions = 0; let expected_evictions = expect_and_reply.len() - 1; let mut executed_failures = HashSet::new(); let expected_fail_count = expect_and_reply .iter() .filter(|(_, reply)|!reply.is_success()) .count(); 'outer: loop { let expect_iter = expect_and_reply.iter(); for (i, interaction) in expect_iter.enumerate() { let (asserter, reply) = interaction; let complete_is_failure =!reply.is_success(); // Only send activation failures once if executed_failures.contains(&i) { continue; } let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap(); let contains_eviction = res.eviction_index(); if let Some(eviction_job_ix) = contains_eviction { // If the job list has an eviction, make sure it was the last item in the list // then remove it, since in the tests we don't explicitly specify evict assertions assert_eq!( eviction_job_ix, res.jobs.len() - 1, "Eviction job was not last job in job list" ); res.jobs.remove(eviction_job_ix); if let Some(omap) = outstanding_map.as_ref() { omap.write().remove_by_left(&res.run_id);
build_mock_pollers
identifier_name
mod.rs
ToTaskNum(usize), AllHistory, } impl From<&usize> for ResponseType { fn from(u: &usize) -> Self { ResponseType::ToTaskNum(*u) } } // :shrug: impl From<&ResponseType> for ResponseType { fn from(r: &ResponseType) -> Self { *r } } /// Given identifiers for a workflow/run, and a test history builder, construct an instance of /// the core SDK with a mock server gateway that will produce the responses as appropriate. /// /// `response_batches` is used to control the fake [PollWorkflowTaskQueueResponse]s returned. For /// each number in the input list, a fake response will be prepared which includes history up to the /// workflow task with that number, as in [TestHistoryBuilder::get_history_info]. pub(crate) fn build_fake_core( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, ) -> CoreSDK { let response_batches = response_batches.into_iter().map(Into::into).collect(); let mock_gateway = build_multihist_mock_sg( vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches, task_q: TEST_Q.to_owned(), }], true, None, ); mock_core(mock_gateway) } pub(crate) fn mock_core<SG>(mocks: MocksHolder<SG>) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { mock_core_with_opts(mocks, CoreInitOptionsBuilder::default()) } pub(crate) fn mock_core_with_opts<SG>( mocks: MocksHolder<SG>, opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { let mut core = mock_core_with_opts_no_workers(mocks.sg, opts); register_mock_workers(&mut core, mocks.mock_pollers.into_values()); core } pub(crate) fn register_mock_workers( core: &mut CoreSDK, mocks: impl IntoIterator<Item = MockWorker>, ) { for worker in mocks { core.reg_worker_sync(worker); } } pub(crate) fn mock_core_with_opts_no_workers<SG>( sg: SG, mut opts: CoreInitOptionsBuilder, ) -> CoreSDK where SG: ServerGatewayApis + Send + Sync +'static, { CoreSDK::new(sg, opts.gateway_opts(fake_sg_opts()).build().unwrap()) } pub struct FakeWfResponses { pub wf_id: String, pub hist: TestHistoryBuilder, pub response_batches: Vec<ResponseType>, pub task_q: String, } // TODO: turn this into a builder or make a new one? to make all these different build fns simpler pub struct MocksHolder<SG> { sg: SG, mock_pollers: HashMap<String, MockWorker>, pub outstanding_task_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, } impl<SG> MocksHolder<SG> { pub fn worker_cfg(&mut self, task_q: &str, mutator: impl FnOnce(&mut WorkerConfig)) { if let Some(w) = self.mock_pollers.get_mut(task_q) { mutator(&mut w.config); } } pub fn take_pollers(self) -> HashMap<String, MockWorker> { self.mock_pollers } } pub struct MockWorker { pub wf_poller: BoxedWFPoller, pub act_poller: Option<BoxedActPoller>, pub config: WorkerConfig, } impl Default for MockWorker { fn default() -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default_test_q(), } } } impl MockWorker { pub fn new(q: &str, wf_poller: BoxedWFPoller) -> Self { MockWorker { wf_poller, act_poller: None, config: WorkerConfig::default(q), } } pub fn for_queue(q: &str) -> Self { MockWorker { wf_poller: Box::from(mock_poller()), act_poller: None, config: WorkerConfig::default(q), } } } impl<SG> MocksHolder<SG> where SG: ServerGatewayApis + Send + Sync +'static, { pub fn from_mock_workers( sg: SG, mock_workers: impl IntoIterator<Item = MockWorker>, ) -> MocksHolder<SG> { let mock_pollers = mock_workers .into_iter() .map(|w| (w.config.task_queue.clone(), w)) .collect(); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } /// Uses the provided list of tasks to create a mock poller for the `TEST_Q` pub fn from_gateway_with_responses( sg: SG, wf_tasks: VecDeque<PollWorkflowTaskQueueResponse>, act_tasks: VecDeque<PollActivityTaskQueueResponse>, ) -> MocksHolder<SG> { let mut mock_pollers = HashMap::new(); let mock_poller = mock_poller_from_resps(wf_tasks); let mock_act_poller = mock_poller_from_resps(act_tasks); mock_pollers.insert( TEST_Q.to_string(), MockWorker { wf_poller: mock_poller, act_poller: Some(mock_act_poller), config: WorkerConfigBuilder::default() .task_queue(TEST_Q) .build() .unwrap(), }, ); MocksHolder { sg, mock_pollers, outstanding_task_map: None, } } } pub fn mock_poller_from_resps<T>(mut tasks: VecDeque<T>) -> BoxedPoller<T> where T: Send + Sync +'static, { let mut mock_poller = mock_poller(); mock_poller.expect_poll().returning(move || { if let Some(t) = tasks.pop_front() { Some(Ok(t)) } else { Some(Err(tonic::Status::out_of_range( "Ran out of mock responses!", ))) } }); Box::new(mock_poller) as BoxedPoller<T> } pub fn mock_poller<T>() -> MockPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockPoller::new(); mock_poller.expect_shutdown_box().return_const(()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } pub fn mock_manual_poller<T>() -> MockManualPoller<T> where T: Send + Sync +'static, { let mut mock_poller = MockManualPoller::new(); mock_poller .expect_shutdown_box() .returning(|| async {}.boxed()); mock_poller.expect_notify_shutdown().return_const(()); mock_poller } /// Build a mock server gateway capable of returning multiple different histories for different /// workflows. It does so by tracking outstanding workflow tasks like is also happening in core /// (which is unfortunately a bit redundant, we could provide hooks in core but that feels a little /// nasty). If there is an outstanding task for a given workflow, new chunks of its history are not /// returned. If there is not, the next batch of history is returned for any workflow without an /// outstanding task. Outstanding tasks are cleared on completion, failure, or eviction. /// /// `num_expected_fails` can be provided to set a specific number of expected failed workflow tasks /// sent to the server. pub fn build_multihist_mock_sg( hists: impl IntoIterator<Item = FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> MocksHolder<MockServerGatewayApis> { let mh = MockPollCfg::new( hists.into_iter().collect(), enforce_correct_number_of_polls, num_expected_fails, ); build_mock_pollers(mh) } /// See [build_multihist_mock_sg] -- one history convenience version pub fn single_hist_mock_sg( wf_id: &str, t: TestHistoryBuilder, response_batches: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, enforce_num_polls: bool, ) -> MocksHolder<MockServerGatewayApis> { let mut mh = MockPollCfg::from_resp_batches(wf_id, t, response_batches, mock_gateway); mh.enforce_correct_number_of_polls = enforce_num_polls; build_mock_pollers(mh) } pub struct MockPollCfg { pub hists: Vec<FakeWfResponses>, pub enforce_correct_number_of_polls: bool, pub num_expected_fails: Option<usize>, pub mock_gateway: MockServerGatewayApis, /// All calls to fail WFTs must match this predicate pub expect_fail_wft_matcher: Box<dyn Fn(&TaskToken, &WorkflowTaskFailedCause, &Option<Failure>) -> bool + Send>, } impl MockPollCfg { pub fn new( hists: Vec<FakeWfResponses>, enforce_correct_number_of_polls: bool, num_expected_fails: Option<usize>, ) -> Self { Self { hists, enforce_correct_number_of_polls, num_expected_fails, mock_gateway: MockServerGatewayApis::new(), expect_fail_wft_matcher: Box::new(|_, _, _| true), } } pub fn from_resp_batches( wf_id: &str, t: TestHistoryBuilder, resps: impl IntoIterator<Item = impl Into<ResponseType>>, mock_gateway: MockServerGatewayApis, ) -> Self { Self { hists: vec![FakeWfResponses { wf_id: wf_id.to_owned(), hist: t, response_batches: resps.into_iter().map(Into::into).collect(), task_q: TEST_Q.to_owned(), }], enforce_correct_number_of_polls: true, num_expected_fails: None, mock_gateway, expect_fail_wft_matcher: Box::new(|_, _, _| true), } } } /// Given an iterable of fake responses, return the mocks & associated data to work with them pub fn build_mock_pollers(mut cfg: MockPollCfg) -> MocksHolder<MockServerGatewayApis> { // Maps task queues to maps of wfid -> responses let mut task_queues_to_resps: HashMap<String, BTreeMap<String, VecDeque<_>>> = HashMap::new(); let outstanding_wf_task_tokens = Arc::new(RwLock::new(BiMap::new())); let mut correct_num_polls = None; for hist in cfg.hists { let full_hist_info = hist.hist.get_full_history_info().unwrap(); // Ensure no response batch is trying to return more tasks than the history contains for respt in &hist.response_batches { if let ResponseType::ToTaskNum(rb_wf_num) = respt { assert!( *rb_wf_num <= full_hist_info.wf_task_count(), "Wf task count {} is not <= total task count {}", rb_wf_num, full_hist_info.wf_task_count() ); } } // TODO: Fix -- or not? Sticky invalidation could make this pointless anyway // Verify response batches only ever return longer histories (IE: Are sorted ascending) // assert!( // hist.response_batches // .as_slice() // .windows(2) // .all(|w| w[0] <= w[1]), // "response batches must have increasing wft numbers" // ); if cfg.enforce_correct_number_of_polls { *correct_num_polls.get_or_insert(0) += hist.response_batches.len(); } // Convert history batches into poll responses, while also tracking how many times a given // history has been returned so we can increment the associated attempt number on the WFT. // NOTE: This is hard to use properly with the `AfterEveryReply` testing eviction mode. // Such usages need a history different from other eviction modes which would include // WFT timeouts or something to simulate the task getting dropped. let mut attempts_at_task_num = HashMap::new(); let responses: Vec<_> = hist .response_batches .iter() .map(|to_task_num| { let cur_attempt = attempts_at_task_num.entry(to_task_num).or_insert(1); let mut r = hist_to_poll_resp( &hist.hist, hist.wf_id.to_owned(), *to_task_num, hist.task_q.clone(), ); r.attempt = *cur_attempt; *cur_attempt += 1; r }) .collect(); let tasks = VecDeque::from(responses); task_queues_to_resps .entry(hist.task_q) .or_default() .insert(hist.wf_id, tasks); } let mut mock_pollers = HashMap::new(); for (task_q, mut queue_tasks) in task_queues_to_resps.into_iter() { let mut mock_poller = mock_poller(); // The poller will return history from any workflow runs that do not have currently // outstanding tasks. let outstanding = outstanding_wf_task_tokens.clone(); mock_poller .expect_poll() .times( correct_num_polls .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move || { for (_, tasks) in queue_tasks.iter_mut() { // Must extract run id from a workflow task associated with this workflow // TODO: Case where run id changes for same workflow id is not handled here if let Some(t) = tasks.get(0) { let rid = t.workflow_execution.as_ref().unwrap().run_id.clone(); if!outstanding.read().contains_left(&rid) { let t = tasks.pop_front().unwrap(); outstanding .write() .insert(rid, TaskToken(t.task_token.clone())); return Some(Ok(t)); } } } Some(Err(tonic::Status::cancelled("No more work to do"))) }); let mw = MockWorker::new(&task_q, Box::from(mock_poller)); mock_pollers.insert(task_q, mw); } let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_complete_workflow_task() .returning(move |comp| { outstanding.write().remove_by_right(&comp.task_token); Ok(RespondWorkflowTaskCompletedResponse::default()) }); let outstanding = outstanding_wf_task_tokens.clone(); cfg.mock_gateway .expect_fail_workflow_task() .withf(cfg.expect_fail_wft_matcher) .times( cfg.num_expected_fails .map::<TimesRange, _>(Into::into) .unwrap_or_else(|| RangeFull.into()), ) .returning(move |tt, _, _| { outstanding.write().remove_by_right(&tt); Ok(Default::default()) }); cfg.mock_gateway .expect_start_workflow() .returning(|_, _, _, _, _| Ok(Default::default())); MocksHolder { sg: cfg.mock_gateway, mock_pollers, outstanding_task_map: Some(outstanding_wf_task_tokens), } } pub fn hist_to_poll_resp( t: &TestHistoryBuilder, wf_id: String, response_type: ResponseType, task_queue: String, ) -> PollWorkflowTaskQueueResponse { let run_id = t.get_orig_run_id(); let wf = WorkflowExecution { workflow_id: wf_id, run_id: run_id.to_string(), }; let hist_info = match response_type { ResponseType::ToTaskNum(tn) => t.get_history_info(tn).unwrap(), ResponseType::AllHistory => t.get_full_history_info().unwrap(), }; let batch = hist_info.events().to_vec(); let task_token: [u8; 16] = thread_rng().gen(); PollWorkflowTaskQueueResponse { history: Some(History { events: batch }), workflow_execution: Some(wf), task_token: task_token.to_vec(), workflow_type: Some(WorkflowType { name: DEFAULT_WORKFLOW_TYPE.to_owned(), }), workflow_execution_task_queue: Some(TaskQueue { name: task_queue, kind: TaskQueueKind::Normal as i32, }), previous_started_event_id: hist_info.previous_started_event_id, started_event_id: hist_info.workflow_task_started_event_id, ..Default::default() } } pub fn fake_sg_opts() -> ServerGatewayOptions { ServerGatewayOptions { target_url: Url::from_str("https://fake").unwrap(), namespace: "".to_string(), client_name: "".to_string(), client_version: "".to_string(), static_headers: Default::default(), identity: "".to_string(), worker_binary_id: "".to_string(), tls_cfg: None, retry_config: Default::default(), } } type AsserterWithReply<'a> = (&'a dyn Fn(&WfActivation), wf_activation_completion::Status); /// This function accepts a list of asserts and replies to workflow activations to run against the /// provided instance of fake core. /// /// It handles the business of re-sending the same activation replies over again in the event /// of eviction or workflow activation failure. Activation failures specifically are only run once, /// since they clearly can't be returned every time we replay the workflow, or it could never /// proceed pub(crate) async fn poll_and_reply<'a>( core: &'a CoreSDK, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], ) { poll_and_reply_clears_outstanding_evicts(core, None, eviction_mode, expect_and_reply).await } pub(crate) async fn poll_and_reply_clears_outstanding_evicts<'a>( core: &'a CoreSDK, outstanding_map: Option<Arc<RwLock<BiMap<String, TaskToken>>>>, eviction_mode: WorkflowCachingPolicy, expect_and_reply: &'a [AsserterWithReply<'a>], )
let mut res = core.poll_workflow_activation(TEST_Q).await.unwrap(); let contains_eviction = res.eviction_index(); if let Some(eviction_job_ix) = contains_eviction { // If the job list has an eviction, make sure it was the last item in the list // then remove it, since in the tests we don't explicitly specify evict assertions assert_eq!( eviction_job_ix, res.jobs.len() - 1, "Eviction job was not last job in job list" ); res.jobs.remove(eviction_job_ix); if let Some(omap) = outstanding_map.as_ref() { omap.write().remove_by_left(&res.run_id);
{ let mut evictions = 0; let expected_evictions = expect_and_reply.len() - 1; let mut executed_failures = HashSet::new(); let expected_fail_count = expect_and_reply .iter() .filter(|(_, reply)| !reply.is_success()) .count(); 'outer: loop { let expect_iter = expect_and_reply.iter(); for (i, interaction) in expect_iter.enumerate() { let (asserter, reply) = interaction; let complete_is_failure = !reply.is_success(); // Only send activation failures once if executed_failures.contains(&i) { continue; }
identifier_body
main.rs
#[macro_use] extern crate clap; extern crate curl; extern crate formdata; extern crate hex; extern crate hmac; extern crate hyper; #[macro_use] extern crate log; extern crate pipe; extern crate rand; extern crate sha_1; #[macro_use] extern crate serde_json; #[macro_use] extern crate serde_derive; extern crate stderrlog; extern crate url; use clap::{Arg, App, SubCommand}; use formdata::{FormData, FilePart, write_formdata}; use hmac::{Hmac, Mac}; use log::LogLevel; use rand::{thread_rng, Rng}; use hex::ToHex; use sha_1::Sha1; use std::env; use std::error::Error; use std::fmt;
use std::io::{BufReader, BufWriter, Read}; use std::str; use std::thread::spawn; use url::Url; use curl::easy::{Easy, List}; const FORM_MAX_FILE_SIZE: u64 = 1099511627776; const FORM_MAX_FILE_COUNT: usize = 1048576; const FORM_EXPIRES: u64 = 4102444800; #[derive(Debug)] struct OpenStackConfig { auth_url: String, project_domain: String, project_name: String, user_domain: String, username: String, password: String, region_name: String } #[derive(Debug)] struct SwiftAuthInfo { token: String, url: String } #[derive(Debug, Serialize, Deserialize)] struct FormTemplate { url: String, redirect: String, max_file_size: u64, max_file_count: usize, expires: u64, signature: String } #[derive(Debug)] struct MissingToken; impl fmt::Display for MissingToken { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Token not found in Keystone response headers") } } impl Error for MissingToken { fn description(&self) -> &str { "Token not found in Keystone response headers" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct MissingSwiftUrl; impl fmt::Display for MissingSwiftUrl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Swift service endpoint URL not found in Keystone JSON catalog") } } impl Error for MissingSwiftUrl { fn description(&self) -> &str { "Swift service endpoint URL not found in Keystone JSON catalog" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct MissingTempUrlKey; impl fmt::Display for MissingTempUrlKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Temp URL key not found in Swift response headers") } } impl Error for MissingTempUrlKey { fn description(&self) -> &str { "Temp URL key not found in Swift response headers" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct UnableToCreateContainer; impl fmt::Display for UnableToCreateContainer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Unable to create Swift container") } } impl Error for UnableToCreateContainer { fn description(&self) -> &str { "Unable to create Swift container" } fn cause(&self) -> Option<&Error> { None } } fn main() { let matches = App::new("backup2swift") .version(crate_version!()) .arg(Arg::with_name("verbosity") .short("v") .multiple(true) .help("Increase message verbosity")) .arg(Arg::with_name("quiet") .short("q") .help("Silence all output")) .subcommand(SubCommand::with_name("setup") .about("Setup container and create signed form template") .arg(Arg::with_name("container") .takes_value(true) .required(true) .help("destination container name"))) .subcommand(SubCommand::with_name("backup") .about("Backup files to container") .arg(Arg::with_name("form_template") .short("c") .long("form-template") .required(true) .takes_value(true) .help("signed POST form template (JSON) created with \"setup\"")) .arg(Arg::with_name("delete_after") .short("t") .long("delete-after") .takes_value(true) .help("seconds to keep file for")) .arg(Arg::with_name("files") .takes_value(true) .multiple(true) .required(true) .help("destination container name"))) .get_matches(); let verbose = matches.occurrences_of("verbosity") as usize; let quiet = matches.is_present("quiet"); stderrlog::new() .module(module_path!()) .quiet(quiet) .verbosity(verbose) .init() .unwrap(); if let Some(matches) = matches.subcommand_matches("setup") { setup(matches.value_of("container").unwrap()); } else if let Some(matches) = matches.subcommand_matches("backup") { let form_template_file = Path::new(matches.value_of("form_template").unwrap()); assert!(form_template_file.is_file()); let expire_after = value_t!(matches, "delete_after", u64).ok(); let file_paths = matches.values_of_lossy("files").unwrap(); let files: &Vec<&Path> = & file_paths.iter().map(|f| Path::new(f)).collect::<Vec<&Path>>(); assert!(files.into_iter().all(|f: &&Path| f.is_file())); backup(form_template_file, expire_after, files); } else { println!("try 'backup2swift --help' for more information"); ::std::process::exit(2) } } fn setup(container_name: &str) -> () { let settings = get_os_settings(); let auth_info = get_token(settings).unwrap(); let temp_url_key = get_temp_url_key(&auth_info) .or_else(|_| set_temp_url_key(&auth_info, &create_random_key())) .unwrap(); ensure_container_exists(&auth_info, container_name).unwrap(); let form_template = backup_config(&auth_info, container_name, &temp_url_key); println!("{}", serde_json::to_string_pretty(&form_template).unwrap()); } fn backup<'a>( form_template_file: &'a Path, delete_after: Option<u64>, files: &'a Vec<&Path>) -> () { let form_template = read_form_template_file(form_template_file).unwrap(); let file_count = files.len(); info!("{:?}", form_template); assert!(form_template.max_file_count >= file_count); let file_parts: Vec<(String, FilePart)> = files.into_iter() .zip(std::ops::Range { start: 0, end: file_count }) .map(|(f,i): (&&Path, usize)| { let mut headers = hyper::header::Headers::new(); headers.append_raw("Content-Type", "application/octet-stream".to_owned().into_bytes()); let output: (String, FilePart) = ( format!("file{}", i), formdata::FilePart::new(headers, f) ); output }) .collect::<Vec<(String, FilePart)>>(); info!("{:?}", file_parts); let mut fields = vec![ ("redirect".to_owned(), form_template.redirect.to_owned()), ("max_file_size".to_owned(), format!("{}", form_template.max_file_size)), ("max_file_count".to_owned(), format!("{}", form_template.max_file_count)), ("expires".to_owned(), format!("{}", form_template.expires)), ("signature".to_owned(), format!("{}", form_template.signature)) ]; match delete_after { Some(n) => fields.push(("x_delete_after".to_owned(), format!("{}", n))), None => () }; let form_data = FormData { fields: fields, files: file_parts }; send_data(form_template, form_data).unwrap(); } fn get_env(name: &str) -> String { env::var(name).expect(& format!("{} environment variable not defined", name)) } fn get_os_settings() -> OpenStackConfig { let auth_url = get_env("OS_AUTH_URL"); info!("OS_AUTH_URL: {}", &auth_url); let user_domain = get_env("OS_USER_DOMAIN_NAME"); info!("OS_PROJECT_NAME: {}", &user_domain); let username = get_env("OS_USERNAME"); info!("OS_USERNAME: {}", &username); let project_domain = get_env("OS_PROJECT_DOMAIN_NAME"); info!("OS_PROJECT_NAME: {}", &project_domain); let project_name = get_env("OS_PROJECT_NAME"); info!("OS_PROJECT_NAME: {}", &project_name); let password = get_env("OS_PASSWORD"); info!("OS_PASSWORD: {}", &("*".repeat(password.len()))); let region_name = get_env("OS_REGION_NAME"); info!("OS_REGION_NAME: {}", &region_name); OpenStackConfig { auth_url, user_domain, username, project_domain, project_name, password, region_name } } fn get_token(config: OpenStackConfig) -> Result<SwiftAuthInfo, Box<Error>> { let mut dst = Vec::new(); let mut easy = Easy::new(); let json = json!({ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": config.user_domain }, "name": config.username, "password": config.password } } }, "scope": { "project": { "domain": { "name": config.project_domain }, "name": config.project_name } } } }); let json_bytes = serde_json::to_vec_pretty(&json).unwrap(); let mut req_reader = BufReader::new(json_bytes.as_slice()); let mut headers = List::new(); let mut opt_token: Option<String> = None; headers.append("Content-Type: application/json")?; headers.append(format!("Content-Length: {}", json_bytes.len()).as_ref())?; headers.append("Accept: application/json")?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& format!("{}auth/tokens", config.auth_url))?; easy.http_headers(headers)?; { let mut transfer = easy.transfer(); transfer.header_function(|header| { let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": "); match splitter.next() { Some(name) if name.to_lowercase() == "x-subject-token" => { splitter.next().map(|s| s.to_owned()).map(|t| { opt_token = Some(t.trim().to_owned()); }); () } _ => () } true })?; transfer.read_function(|into| { Ok(req_reader.read(into).unwrap()) })?; transfer.write_function(|data| { dst.extend_from_slice(data); Ok(data.len()) })?; transfer.perform()? } match opt_token { Some(token) => { let response_json: Result<serde_json::Value, serde_json::Error> = serde_json::from_slice(dst.as_slice()); response_json .map(|j| { j.get("token") .and_then(|v| v.get("catalog")) .and_then(|v| v.as_array()) .and_then(|catalog| get_swift_endpoint(catalog.iter(), config.region_name.to_owned())) }) .map_err(|e| From::from(e)) .and_then(|opt_url| { opt_url .map(|url| SwiftAuthInfo { token, url }) .ok_or(MissingSwiftUrl) .map_err(|e| From::from(e)) }) }, None => Err(From::from(MissingToken)) } } fn get_swift_endpoint<'a,I>( catalog: I, region_name: String) -> Option<String> where I: Iterator<Item=&'a serde_json::Value> { catalog .filter_map(|item| { match item.get("type").and_then(|v| v.as_str()) { Some(t) if t == "object-store" => item.get("endpoints").and_then(|v| v.as_array()).map(|v| v.into_iter()), _ => None } }) .flat_map(|endpoints| endpoints) .find(|endpoint| { (match endpoint.get("interface").and_then(|v| v.as_str()) { Some(i) if i == "public" => true, _ => false }) && ( match endpoint.get("region").and_then(|v| v.as_str()) { Some(region) if region == region_name => true, _ => false }) }) .and_then(|endpoint| endpoint.get("url").and_then(|v| v.as_str())).map(|s| s.to_owned()) } fn get_temp_url_key(info: &SwiftAuthInfo) -> Result<String, Box<Error>> { let mut opt_temp_url_key: Option<String> = None; let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.nobody(true)?; easy.url(& format!("{}", info.url))?; easy.http_headers(headers)?; { let mut transfer = easy.transfer(); transfer.header_function(|header| { let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": "); match splitter.next() { Some(name) if name.to_lowercase() == "x-account-meta-temp-url-key" => { splitter.next().map(|s| s.to_owned()).map(|t| { opt_temp_url_key = Some(t.trim().to_owned()); }); () } _ => () } true })?; transfer.perform()? } opt_temp_url_key .ok_or(MissingToken) .map_err(|e| From::from(e)) } fn create_random_key() -> String { thread_rng().gen_ascii_chars().take(32).collect() } fn set_temp_url_key(info: &SwiftAuthInfo, temp_url_key: &str) -> Result<String, Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append(& format!("X-Account-Meta-Temp-Url-Key: {}", temp_url_key))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& format!("{}", info.url))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|code| { match code { 200...299 => Ok(temp_url_key.to_owned()), _ => Err(From::from(MissingTempUrlKey)) } }) } fn ensure_container_exists(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.nobody(true)?; easy.url(& format!("{}/{}", info.url, container))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|code| { match code { 200...299 => Ok(()), _ => create_container(info, container) } }) } fn create_container(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append("Content-Length: 0")?; headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.put(true)?; easy.url(& format!("{}/{}", info.url, container))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|response_code| { match response_code { 200...299 => Ok(()), _ => Err(From::from(UnableToCreateContainer)) } }) } fn form_post_url(info: &SwiftAuthInfo, container: &str) -> Url { Url::parse(& format!("{}/{}/", info.url, container)).unwrap() } fn signature( signature_path: &str, redirect: &str, max_file_size: &u64, max_file_count: &usize, expires: &u64, temp_url_key: &str) -> String { let input = format!( "{}\n{}\n{}\n{}\n{}", signature_path, redirect, max_file_size, max_file_count, expires ); // Create `Mac` trait implementation, namely HMAC-SHA256 let mut mac = Hmac::<Sha1>::new(temp_url_key.as_bytes()); mac.input(input.as_bytes()); mac.result().code().to_hex() } fn backup_config(info: &SwiftAuthInfo, container: &str, temp_url_key: &str) -> FormTemplate { let url: Url = form_post_url(info, container); let redirect = ""; let max_file_size = FORM_MAX_FILE_SIZE; let max_file_count = FORM_MAX_FILE_COUNT; let expires = FORM_EXPIRES; FormTemplate { url: url.as_str().to_owned(), redirect: redirect.to_owned(), max_file_size: max_file_size, max_file_count: max_file_count, expires: expires, signature: signature( url.path(), redirect, &max_file_size, &max_file_count, &expires, temp_url_key) } } fn read_form_template_file<'a>(config_file: &'a Path) -> Result<FormTemplate, Box<Error>> { let f = File::open(config_file)?; let rdr = BufReader::new(f); serde_json::from_reader(rdr).map_err(|e| From::from(e)) } fn send_data(form_template: FormTemplate, form_data: FormData) -> Result<(), Box<Error>> { let mut headers = List::new(); let boundary_str: &str = & { let rand_str: String = thread_rng().gen_ascii_chars().take(20).collect(); "-".repeat(20).to_string() + &rand_str }; let boundary: Vec<u8> = boundary_str.to_owned().into_bytes(); let mut sink = std::io::sink(); let content_length = write_formdata(&mut sink, &boundary, &form_data)?; headers.append(& format!("Content-Length: {}", content_length))?; headers.append(& format!("Content-Type: multipart/form-data; boundary={}", boundary_str))?; let mut easy = Easy::new(); easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& form_template.url)?; easy.http_headers(headers)?; { const BUFFER_SIZE: usize = 524288; let (r, w) = pipe::pipe(); let mut br = BufReader::with_capacity(BUFFER_SIZE, r); let mut bw = BufWriter::with_capacity(BUFFER_SIZE, w); spawn(move || write_formdata(&mut bw, &boundary, &form_data)); let mut transfer = easy.transfer(); transfer.read_function(|into| { Ok(br.read(into).unwrap_or(0)) })?; transfer.perform()?; } Ok(()) }
use std::fs::File; use std::path::Path;
random_line_split
main.rs
#[macro_use] extern crate clap; extern crate curl; extern crate formdata; extern crate hex; extern crate hmac; extern crate hyper; #[macro_use] extern crate log; extern crate pipe; extern crate rand; extern crate sha_1; #[macro_use] extern crate serde_json; #[macro_use] extern crate serde_derive; extern crate stderrlog; extern crate url; use clap::{Arg, App, SubCommand}; use formdata::{FormData, FilePart, write_formdata}; use hmac::{Hmac, Mac}; use log::LogLevel; use rand::{thread_rng, Rng}; use hex::ToHex; use sha_1::Sha1; use std::env; use std::error::Error; use std::fmt; use std::fs::File; use std::path::Path; use std::io::{BufReader, BufWriter, Read}; use std::str; use std::thread::spawn; use url::Url; use curl::easy::{Easy, List}; const FORM_MAX_FILE_SIZE: u64 = 1099511627776; const FORM_MAX_FILE_COUNT: usize = 1048576; const FORM_EXPIRES: u64 = 4102444800; #[derive(Debug)] struct OpenStackConfig { auth_url: String, project_domain: String, project_name: String, user_domain: String, username: String, password: String, region_name: String } #[derive(Debug)] struct SwiftAuthInfo { token: String, url: String } #[derive(Debug, Serialize, Deserialize)] struct
{ url: String, redirect: String, max_file_size: u64, max_file_count: usize, expires: u64, signature: String } #[derive(Debug)] struct MissingToken; impl fmt::Display for MissingToken { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Token not found in Keystone response headers") } } impl Error for MissingToken { fn description(&self) -> &str { "Token not found in Keystone response headers" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct MissingSwiftUrl; impl fmt::Display for MissingSwiftUrl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Swift service endpoint URL not found in Keystone JSON catalog") } } impl Error for MissingSwiftUrl { fn description(&self) -> &str { "Swift service endpoint URL not found in Keystone JSON catalog" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct MissingTempUrlKey; impl fmt::Display for MissingTempUrlKey { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Temp URL key not found in Swift response headers") } } impl Error for MissingTempUrlKey { fn description(&self) -> &str { "Temp URL key not found in Swift response headers" } fn cause(&self) -> Option<&Error> { None } } #[derive(Debug)] struct UnableToCreateContainer; impl fmt::Display for UnableToCreateContainer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Unable to create Swift container") } } impl Error for UnableToCreateContainer { fn description(&self) -> &str { "Unable to create Swift container" } fn cause(&self) -> Option<&Error> { None } } fn main() { let matches = App::new("backup2swift") .version(crate_version!()) .arg(Arg::with_name("verbosity") .short("v") .multiple(true) .help("Increase message verbosity")) .arg(Arg::with_name("quiet") .short("q") .help("Silence all output")) .subcommand(SubCommand::with_name("setup") .about("Setup container and create signed form template") .arg(Arg::with_name("container") .takes_value(true) .required(true) .help("destination container name"))) .subcommand(SubCommand::with_name("backup") .about("Backup files to container") .arg(Arg::with_name("form_template") .short("c") .long("form-template") .required(true) .takes_value(true) .help("signed POST form template (JSON) created with \"setup\"")) .arg(Arg::with_name("delete_after") .short("t") .long("delete-after") .takes_value(true) .help("seconds to keep file for")) .arg(Arg::with_name("files") .takes_value(true) .multiple(true) .required(true) .help("destination container name"))) .get_matches(); let verbose = matches.occurrences_of("verbosity") as usize; let quiet = matches.is_present("quiet"); stderrlog::new() .module(module_path!()) .quiet(quiet) .verbosity(verbose) .init() .unwrap(); if let Some(matches) = matches.subcommand_matches("setup") { setup(matches.value_of("container").unwrap()); } else if let Some(matches) = matches.subcommand_matches("backup") { let form_template_file = Path::new(matches.value_of("form_template").unwrap()); assert!(form_template_file.is_file()); let expire_after = value_t!(matches, "delete_after", u64).ok(); let file_paths = matches.values_of_lossy("files").unwrap(); let files: &Vec<&Path> = & file_paths.iter().map(|f| Path::new(f)).collect::<Vec<&Path>>(); assert!(files.into_iter().all(|f: &&Path| f.is_file())); backup(form_template_file, expire_after, files); } else { println!("try 'backup2swift --help' for more information"); ::std::process::exit(2) } } fn setup(container_name: &str) -> () { let settings = get_os_settings(); let auth_info = get_token(settings).unwrap(); let temp_url_key = get_temp_url_key(&auth_info) .or_else(|_| set_temp_url_key(&auth_info, &create_random_key())) .unwrap(); ensure_container_exists(&auth_info, container_name).unwrap(); let form_template = backup_config(&auth_info, container_name, &temp_url_key); println!("{}", serde_json::to_string_pretty(&form_template).unwrap()); } fn backup<'a>( form_template_file: &'a Path, delete_after: Option<u64>, files: &'a Vec<&Path>) -> () { let form_template = read_form_template_file(form_template_file).unwrap(); let file_count = files.len(); info!("{:?}", form_template); assert!(form_template.max_file_count >= file_count); let file_parts: Vec<(String, FilePart)> = files.into_iter() .zip(std::ops::Range { start: 0, end: file_count }) .map(|(f,i): (&&Path, usize)| { let mut headers = hyper::header::Headers::new(); headers.append_raw("Content-Type", "application/octet-stream".to_owned().into_bytes()); let output: (String, FilePart) = ( format!("file{}", i), formdata::FilePart::new(headers, f) ); output }) .collect::<Vec<(String, FilePart)>>(); info!("{:?}", file_parts); let mut fields = vec![ ("redirect".to_owned(), form_template.redirect.to_owned()), ("max_file_size".to_owned(), format!("{}", form_template.max_file_size)), ("max_file_count".to_owned(), format!("{}", form_template.max_file_count)), ("expires".to_owned(), format!("{}", form_template.expires)), ("signature".to_owned(), format!("{}", form_template.signature)) ]; match delete_after { Some(n) => fields.push(("x_delete_after".to_owned(), format!("{}", n))), None => () }; let form_data = FormData { fields: fields, files: file_parts }; send_data(form_template, form_data).unwrap(); } fn get_env(name: &str) -> String { env::var(name).expect(& format!("{} environment variable not defined", name)) } fn get_os_settings() -> OpenStackConfig { let auth_url = get_env("OS_AUTH_URL"); info!("OS_AUTH_URL: {}", &auth_url); let user_domain = get_env("OS_USER_DOMAIN_NAME"); info!("OS_PROJECT_NAME: {}", &user_domain); let username = get_env("OS_USERNAME"); info!("OS_USERNAME: {}", &username); let project_domain = get_env("OS_PROJECT_DOMAIN_NAME"); info!("OS_PROJECT_NAME: {}", &project_domain); let project_name = get_env("OS_PROJECT_NAME"); info!("OS_PROJECT_NAME: {}", &project_name); let password = get_env("OS_PASSWORD"); info!("OS_PASSWORD: {}", &("*".repeat(password.len()))); let region_name = get_env("OS_REGION_NAME"); info!("OS_REGION_NAME: {}", &region_name); OpenStackConfig { auth_url, user_domain, username, project_domain, project_name, password, region_name } } fn get_token(config: OpenStackConfig) -> Result<SwiftAuthInfo, Box<Error>> { let mut dst = Vec::new(); let mut easy = Easy::new(); let json = json!({ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": config.user_domain }, "name": config.username, "password": config.password } } }, "scope": { "project": { "domain": { "name": config.project_domain }, "name": config.project_name } } } }); let json_bytes = serde_json::to_vec_pretty(&json).unwrap(); let mut req_reader = BufReader::new(json_bytes.as_slice()); let mut headers = List::new(); let mut opt_token: Option<String> = None; headers.append("Content-Type: application/json")?; headers.append(format!("Content-Length: {}", json_bytes.len()).as_ref())?; headers.append("Accept: application/json")?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& format!("{}auth/tokens", config.auth_url))?; easy.http_headers(headers)?; { let mut transfer = easy.transfer(); transfer.header_function(|header| { let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": "); match splitter.next() { Some(name) if name.to_lowercase() == "x-subject-token" => { splitter.next().map(|s| s.to_owned()).map(|t| { opt_token = Some(t.trim().to_owned()); }); () } _ => () } true })?; transfer.read_function(|into| { Ok(req_reader.read(into).unwrap()) })?; transfer.write_function(|data| { dst.extend_from_slice(data); Ok(data.len()) })?; transfer.perform()? } match opt_token { Some(token) => { let response_json: Result<serde_json::Value, serde_json::Error> = serde_json::from_slice(dst.as_slice()); response_json .map(|j| { j.get("token") .and_then(|v| v.get("catalog")) .and_then(|v| v.as_array()) .and_then(|catalog| get_swift_endpoint(catalog.iter(), config.region_name.to_owned())) }) .map_err(|e| From::from(e)) .and_then(|opt_url| { opt_url .map(|url| SwiftAuthInfo { token, url }) .ok_or(MissingSwiftUrl) .map_err(|e| From::from(e)) }) }, None => Err(From::from(MissingToken)) } } fn get_swift_endpoint<'a,I>( catalog: I, region_name: String) -> Option<String> where I: Iterator<Item=&'a serde_json::Value> { catalog .filter_map(|item| { match item.get("type").and_then(|v| v.as_str()) { Some(t) if t == "object-store" => item.get("endpoints").and_then(|v| v.as_array()).map(|v| v.into_iter()), _ => None } }) .flat_map(|endpoints| endpoints) .find(|endpoint| { (match endpoint.get("interface").and_then(|v| v.as_str()) { Some(i) if i == "public" => true, _ => false }) && ( match endpoint.get("region").and_then(|v| v.as_str()) { Some(region) if region == region_name => true, _ => false }) }) .and_then(|endpoint| endpoint.get("url").and_then(|v| v.as_str())).map(|s| s.to_owned()) } fn get_temp_url_key(info: &SwiftAuthInfo) -> Result<String, Box<Error>> { let mut opt_temp_url_key: Option<String> = None; let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.nobody(true)?; easy.url(& format!("{}", info.url))?; easy.http_headers(headers)?; { let mut transfer = easy.transfer(); transfer.header_function(|header| { let mut splitter = str::from_utf8(header).unwrap().splitn(2, ": "); match splitter.next() { Some(name) if name.to_lowercase() == "x-account-meta-temp-url-key" => { splitter.next().map(|s| s.to_owned()).map(|t| { opt_temp_url_key = Some(t.trim().to_owned()); }); () } _ => () } true })?; transfer.perform()? } opt_temp_url_key .ok_or(MissingToken) .map_err(|e| From::from(e)) } fn create_random_key() -> String { thread_rng().gen_ascii_chars().take(32).collect() } fn set_temp_url_key(info: &SwiftAuthInfo, temp_url_key: &str) -> Result<String, Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append(& format!("X-Account-Meta-Temp-Url-Key: {}", temp_url_key))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& format!("{}", info.url))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|code| { match code { 200...299 => Ok(temp_url_key.to_owned()), _ => Err(From::from(MissingTempUrlKey)) } }) } fn ensure_container_exists(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.nobody(true)?; easy.url(& format!("{}/{}", info.url, container))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|code| { match code { 200...299 => Ok(()), _ => create_container(info, container) } }) } fn create_container(info: &SwiftAuthInfo, container: &str) -> Result<(), Box<Error>> { let mut easy = Easy::new(); let mut headers = List::new(); headers.append("Content-Length: 0")?; headers.append(& format!("X-Auth-Token: {}", info.token))?; headers.append("Expect: ")?; easy.verbose(log_enabled!(LogLevel::Debug))?; easy.put(true)?; easy.url(& format!("{}/{}", info.url, container))?; easy.http_headers(headers)?; easy.perform()?; easy.response_code() .map_err(|e| From::from(e)) .and_then(|response_code| { match response_code { 200...299 => Ok(()), _ => Err(From::from(UnableToCreateContainer)) } }) } fn form_post_url(info: &SwiftAuthInfo, container: &str) -> Url { Url::parse(& format!("{}/{}/", info.url, container)).unwrap() } fn signature( signature_path: &str, redirect: &str, max_file_size: &u64, max_file_count: &usize, expires: &u64, temp_url_key: &str) -> String { let input = format!( "{}\n{}\n{}\n{}\n{}", signature_path, redirect, max_file_size, max_file_count, expires ); // Create `Mac` trait implementation, namely HMAC-SHA256 let mut mac = Hmac::<Sha1>::new(temp_url_key.as_bytes()); mac.input(input.as_bytes()); mac.result().code().to_hex() } fn backup_config(info: &SwiftAuthInfo, container: &str, temp_url_key: &str) -> FormTemplate { let url: Url = form_post_url(info, container); let redirect = ""; let max_file_size = FORM_MAX_FILE_SIZE; let max_file_count = FORM_MAX_FILE_COUNT; let expires = FORM_EXPIRES; FormTemplate { url: url.as_str().to_owned(), redirect: redirect.to_owned(), max_file_size: max_file_size, max_file_count: max_file_count, expires: expires, signature: signature( url.path(), redirect, &max_file_size, &max_file_count, &expires, temp_url_key) } } fn read_form_template_file<'a>(config_file: &'a Path) -> Result<FormTemplate, Box<Error>> { let f = File::open(config_file)?; let rdr = BufReader::new(f); serde_json::from_reader(rdr).map_err(|e| From::from(e)) } fn send_data(form_template: FormTemplate, form_data: FormData) -> Result<(), Box<Error>> { let mut headers = List::new(); let boundary_str: &str = & { let rand_str: String = thread_rng().gen_ascii_chars().take(20).collect(); "-".repeat(20).to_string() + &rand_str }; let boundary: Vec<u8> = boundary_str.to_owned().into_bytes(); let mut sink = std::io::sink(); let content_length = write_formdata(&mut sink, &boundary, &form_data)?; headers.append(& format!("Content-Length: {}", content_length))?; headers.append(& format!("Content-Type: multipart/form-data; boundary={}", boundary_str))?; let mut easy = Easy::new(); easy.verbose(log_enabled!(LogLevel::Debug))?; easy.post(true)?; easy.url(& form_template.url)?; easy.http_headers(headers)?; { const BUFFER_SIZE: usize = 524288; let (r, w) = pipe::pipe(); let mut br = BufReader::with_capacity(BUFFER_SIZE, r); let mut bw = BufWriter::with_capacity(BUFFER_SIZE, w); spawn(move || write_formdata(&mut bw, &boundary, &form_data)); let mut transfer = easy.transfer(); transfer.read_function(|into| { Ok(br.read(into).unwrap_or(0)) })?; transfer.perform()?; } Ok(()) }
FormTemplate
identifier_name
interpolation.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! String interpolation-related code. //! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting> use crate::values::{tuple::Tuple, Heap, Value, ValueLike}; use gazebo::prelude::*; use std::{fmt::Write, iter}; use thiserror::Error; const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'"; /// Operator `%` format or evaluation errors #[derive(Clone, Dupe, Debug, Error)] enum StringInterpolationError { #[error( "Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression" )] UnexpectedEOFClosingParen, /// `%` must be followed by specifier. #[error("Unexpected EOF in format string. {}", AFTER_PERCENT)] UnexpectedEOFPercent, #[error("Unknown format string specifier '{}'. {}",.0.escape_default(), AFTER_PERCENT)] UnknownSpecifier(char), #[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter",.0)] ValueNotInUTFRange(u32), /// Interpolation parameter is too big for the format string. #[error("Too many arguments for format string")] TooManyParameters, /// Interpolation parameter is too small for the format string. #[error("Not enough arguments for format string")] NotEnoughParameters, #[error("'%c' formatter requires a single-character string")] ValueNotChar, } /// Format char enum ArgFormat { // str(x) Str, // repr(x) Repr, // signed integer decimal Dec, // signed octal Oct, // signed hexadecimal, lowercase HexLower, // signed hexadecimal, uppercase HexUpper, // x for string, chr(x) for int Char, // `%` sign Percent, } impl ArgFormat { fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> { match self { // Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid // allocating a separate `String` on the way. ArgFormat::Str => match arg.unpack_str() { None => arg.collect_repr(out), Some(v) => out.push_str(v), }, ArgFormat::Repr => arg.collect_repr(out), ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(), ArgFormat::Oct => { let v = arg.to_int()?; write!( out, "{}{:o}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexLower => { let v = arg.to_int()?; write!( out, "{}{:x}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexUpper => { let v = arg.to_int()?; write!( out, "{}{:X}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::Char => match arg.unpack_str() { Some(arg) => { let mut chars = arg.chars(); let c = chars.next(); match c { Some(c) if chars.next().is_none() => out.push(c), _ => return Err(StringInterpolationError::ValueNotChar.into()), } } None => { let i = arg.to_int()? as u32; match std::char::from_u32(i) { Some(c) => write!(out, "{}", c).unwrap(), None => { return Err(StringInterpolationError::ValueNotInUTFRange(i).into()); } } } }, ArgFormat::Percent => { out.push('%'); } } Ok(()) } } // %(name)s or %s enum NamedOrPositional { Named(String), Positional, } /// Implement Python `%` format strings. pub struct Interpolation { /// String before first parameter init: String, /// Number of positional arguments positional_count: usize, /// Number of named arguments named_count: usize, /// Arguments followed by uninterpreted strings parameters: Vec<(NamedOrPositional, ArgFormat, String)>, } impl Interpolation { fn append_literal(&mut self, c: char) { if let Some(p) = self.parameters.last_mut() { p.2.push(c); } else { self.init.push(c) } } /// Parse a percent-interpolation string, returning an `Err` if the string is invalid. pub fn parse(format: &str) -> anyhow::Result<Self> { let mut result = Self { init: String::new(), positional_count: 0, named_count: 0, parameters: Vec::new(), }; let mut chars = format.chars(); while let Some(c) = chars.next() { if c!= '%' { result.append_literal(c); } else { let next = chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?; let (named_or_positional, format_char) = if next == '(' { let mut name = String::new(); loop { match chars.next() { None => { return Err( StringInterpolationError::UnexpectedEOFClosingParen.into() ); } Some(')') => { break; } Some(c) => name.push(c), } } ( NamedOrPositional::Named(name), chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?, ) } else { (NamedOrPositional::Positional, next) }; let format = match format_char { 's' => ArgFormat::Str, 'r' => ArgFormat::Repr, 'd' | 'i' => ArgFormat::Dec, 'o' => ArgFormat::Oct, 'x' => ArgFormat::HexLower, 'X' => ArgFormat::HexUpper, 'c' => ArgFormat::Char, '%' => match named_or_positional { NamedOrPositional::Positional => { result.append_literal('%'); continue; } NamedOrPositional::Named(_) => { // In both Python and Starlark Go implementations // `%(n)%` consumes named argument, but // `%%` does not consume positional argument. // So `Percent` variant is added only when `ArgFormat` is `Named`. ArgFormat::Percent } }, c => return Err(StringInterpolationError::UnknownSpecifier(c).into()), }; match named_or_positional { NamedOrPositional::Positional =>
NamedOrPositional::Named(..) => { result.named_count += 1; } } result .parameters .push((named_or_positional, format, String::new())); } } Ok(result) } /// Apply a percent-interpolation string to a value. pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> { let mut r = self.init; let owned_tuple; let mut arg_iter: Box<dyn Iterator<Item = Value>> = if self.named_count > 0 && self.positional_count == 0 { box iter::empty() } else { match Tuple::from_value(argument) { Some(x) => { owned_tuple = x; box owned_tuple.iter() } None => box iter::once(argument), } }; for (named_or_positional, format, tail) in self.parameters { let arg = match named_or_positional { NamedOrPositional::Positional => match arg_iter.next() { Some(a) => a, None => return Err(StringInterpolationError::NotEnoughParameters.into()), }, NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?, }; format.format_arg(&mut r, arg)?; r.push_str(&tail); } if arg_iter.next().is_some() { return Err(StringInterpolationError::TooManyParameters.into()); } Ok(r) } }
{ result.positional_count += 1; }
conditional_block
interpolation.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! String interpolation-related code. //! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting> use crate::values::{tuple::Tuple, Heap, Value, ValueLike}; use gazebo::prelude::*; use std::{fmt::Write, iter}; use thiserror::Error; const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'"; /// Operator `%` format or evaluation errors #[derive(Clone, Dupe, Debug, Error)] enum StringInterpolationError { #[error( "Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression" )] UnexpectedEOFClosingParen, /// `%` must be followed by specifier. #[error("Unexpected EOF in format string. {}", AFTER_PERCENT)] UnexpectedEOFPercent, #[error("Unknown format string specifier '{}'. {}",.0.escape_default(), AFTER_PERCENT)] UnknownSpecifier(char), #[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter",.0)] ValueNotInUTFRange(u32), /// Interpolation parameter is too big for the format string. #[error("Too many arguments for format string")] TooManyParameters, /// Interpolation parameter is too small for the format string. #[error("Not enough arguments for format string")] NotEnoughParameters, #[error("'%c' formatter requires a single-character string")] ValueNotChar, } /// Format char enum ArgFormat { // str(x) Str, // repr(x) Repr, // signed integer decimal Dec, // signed octal Oct, // signed hexadecimal, lowercase HexLower, // signed hexadecimal, uppercase HexUpper, // x for string, chr(x) for int Char, // `%` sign Percent, } impl ArgFormat { fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> { match self { // Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid // allocating a separate `String` on the way. ArgFormat::Str => match arg.unpack_str() { None => arg.collect_repr(out), Some(v) => out.push_str(v), }, ArgFormat::Repr => arg.collect_repr(out), ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(), ArgFormat::Oct => { let v = arg.to_int()?; write!( out, "{}{:o}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexLower => { let v = arg.to_int()?; write!( out, "{}{:x}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexUpper => { let v = arg.to_int()?; write!( out, "{}{:X}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::Char => match arg.unpack_str() { Some(arg) => { let mut chars = arg.chars(); let c = chars.next(); match c { Some(c) if chars.next().is_none() => out.push(c), _ => return Err(StringInterpolationError::ValueNotChar.into()), } } None => { let i = arg.to_int()? as u32; match std::char::from_u32(i) { Some(c) => write!(out, "{}", c).unwrap(), None => { return Err(StringInterpolationError::ValueNotInUTFRange(i).into()); } } } }, ArgFormat::Percent => { out.push('%'); } } Ok(()) } } // %(name)s or %s enum
{ Named(String), Positional, } /// Implement Python `%` format strings. pub struct Interpolation { /// String before first parameter init: String, /// Number of positional arguments positional_count: usize, /// Number of named arguments named_count: usize, /// Arguments followed by uninterpreted strings parameters: Vec<(NamedOrPositional, ArgFormat, String)>, } impl Interpolation { fn append_literal(&mut self, c: char) { if let Some(p) = self.parameters.last_mut() { p.2.push(c); } else { self.init.push(c) } } /// Parse a percent-interpolation string, returning an `Err` if the string is invalid. pub fn parse(format: &str) -> anyhow::Result<Self> { let mut result = Self { init: String::new(), positional_count: 0, named_count: 0, parameters: Vec::new(), }; let mut chars = format.chars(); while let Some(c) = chars.next() { if c!= '%' { result.append_literal(c); } else { let next = chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?; let (named_or_positional, format_char) = if next == '(' { let mut name = String::new(); loop { match chars.next() { None => { return Err( StringInterpolationError::UnexpectedEOFClosingParen.into() ); } Some(')') => { break; } Some(c) => name.push(c), } } ( NamedOrPositional::Named(name), chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?, ) } else { (NamedOrPositional::Positional, next) }; let format = match format_char { 's' => ArgFormat::Str, 'r' => ArgFormat::Repr, 'd' | 'i' => ArgFormat::Dec, 'o' => ArgFormat::Oct, 'x' => ArgFormat::HexLower, 'X' => ArgFormat::HexUpper, 'c' => ArgFormat::Char, '%' => match named_or_positional { NamedOrPositional::Positional => { result.append_literal('%'); continue; } NamedOrPositional::Named(_) => { // In both Python and Starlark Go implementations // `%(n)%` consumes named argument, but // `%%` does not consume positional argument. // So `Percent` variant is added only when `ArgFormat` is `Named`. ArgFormat::Percent } }, c => return Err(StringInterpolationError::UnknownSpecifier(c).into()), }; match named_or_positional { NamedOrPositional::Positional => { result.positional_count += 1; } NamedOrPositional::Named(..) => { result.named_count += 1; } } result .parameters .push((named_or_positional, format, String::new())); } } Ok(result) } /// Apply a percent-interpolation string to a value. pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> { let mut r = self.init; let owned_tuple; let mut arg_iter: Box<dyn Iterator<Item = Value>> = if self.named_count > 0 && self.positional_count == 0 { box iter::empty() } else { match Tuple::from_value(argument) { Some(x) => { owned_tuple = x; box owned_tuple.iter() } None => box iter::once(argument), } }; for (named_or_positional, format, tail) in self.parameters { let arg = match named_or_positional { NamedOrPositional::Positional => match arg_iter.next() { Some(a) => a, None => return Err(StringInterpolationError::NotEnoughParameters.into()), }, NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?, }; format.format_arg(&mut r, arg)?; r.push_str(&tail); } if arg_iter.next().is_some() { return Err(StringInterpolationError::TooManyParameters.into()); } Ok(r) } }
NamedOrPositional
identifier_name
interpolation.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! String interpolation-related code. //! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting> use crate::values::{tuple::Tuple, Heap, Value, ValueLike}; use gazebo::prelude::*; use std::{fmt::Write, iter}; use thiserror::Error; const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'"; /// Operator `%` format or evaluation errors #[derive(Clone, Dupe, Debug, Error)] enum StringInterpolationError { #[error( "Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression" )] UnexpectedEOFClosingParen, /// `%` must be followed by specifier. #[error("Unexpected EOF in format string. {}", AFTER_PERCENT)] UnexpectedEOFPercent, #[error("Unknown format string specifier '{}'. {}",.0.escape_default(), AFTER_PERCENT)] UnknownSpecifier(char), #[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter",.0)] ValueNotInUTFRange(u32), /// Interpolation parameter is too big for the format string. #[error("Too many arguments for format string")] TooManyParameters, /// Interpolation parameter is too small for the format string. #[error("Not enough arguments for format string")] NotEnoughParameters, #[error("'%c' formatter requires a single-character string")] ValueNotChar, } /// Format char enum ArgFormat { // str(x) Str, // repr(x) Repr, // signed integer decimal Dec, // signed octal Oct, // signed hexadecimal, lowercase HexLower, // signed hexadecimal, uppercase HexUpper, // x for string, chr(x) for int Char, // `%` sign Percent, } impl ArgFormat { fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> { match self { // Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid // allocating a separate `String` on the way. ArgFormat::Str => match arg.unpack_str() { None => arg.collect_repr(out), Some(v) => out.push_str(v), }, ArgFormat::Repr => arg.collect_repr(out), ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(), ArgFormat::Oct => { let v = arg.to_int()?; write!( out, "{}{:o}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexLower => { let v = arg.to_int()?; write!( out, "{}{:x}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexUpper => { let v = arg.to_int()?; write!( out, "{}{:X}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::Char => match arg.unpack_str() { Some(arg) => { let mut chars = arg.chars(); let c = chars.next(); match c { Some(c) if chars.next().is_none() => out.push(c), _ => return Err(StringInterpolationError::ValueNotChar.into()), } } None => { let i = arg.to_int()? as u32; match std::char::from_u32(i) { Some(c) => write!(out, "{}", c).unwrap(), None => { return Err(StringInterpolationError::ValueNotInUTFRange(i).into()); } } } }, ArgFormat::Percent => { out.push('%'); } } Ok(()) } } // %(name)s or %s enum NamedOrPositional { Named(String), Positional, } /// Implement Python `%` format strings. pub struct Interpolation { /// String before first parameter init: String, /// Number of positional arguments positional_count: usize, /// Number of named arguments named_count: usize, /// Arguments followed by uninterpreted strings parameters: Vec<(NamedOrPositional, ArgFormat, String)>, } impl Interpolation { fn append_literal(&mut self, c: char) { if let Some(p) = self.parameters.last_mut() { p.2.push(c); } else { self.init.push(c) } } /// Parse a percent-interpolation string, returning an `Err` if the string is invalid. pub fn parse(format: &str) -> anyhow::Result<Self> { let mut result = Self { init: String::new(), positional_count: 0, named_count: 0, parameters: Vec::new(), }; let mut chars = format.chars(); while let Some(c) = chars.next() { if c!= '%' { result.append_literal(c); } else { let next = chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?; let (named_or_positional, format_char) = if next == '(' { let mut name = String::new(); loop { match chars.next() { None => { return Err( StringInterpolationError::UnexpectedEOFClosingParen.into() ); } Some(')') => { break; } Some(c) => name.push(c), } } ( NamedOrPositional::Named(name), chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?, ) } else { (NamedOrPositional::Positional, next) }; let format = match format_char { 's' => ArgFormat::Str, 'r' => ArgFormat::Repr, 'd' | 'i' => ArgFormat::Dec, 'o' => ArgFormat::Oct, 'x' => ArgFormat::HexLower, 'X' => ArgFormat::HexUpper, 'c' => ArgFormat::Char, '%' => match named_or_positional { NamedOrPositional::Positional => { result.append_literal('%'); continue; } NamedOrPositional::Named(_) => { // In both Python and Starlark Go implementations // `%(n)%` consumes named argument, but // `%%` does not consume positional argument. // So `Percent` variant is added only when `ArgFormat` is `Named`. ArgFormat::Percent } }, c => return Err(StringInterpolationError::UnknownSpecifier(c).into()), }; match named_or_positional { NamedOrPositional::Positional => { result.positional_count += 1; } NamedOrPositional::Named(..) => { result.named_count += 1; } } result .parameters .push((named_or_positional, format, String::new())); } } Ok(result) } /// Apply a percent-interpolation string to a value. pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> { let mut r = self.init; let owned_tuple; let mut arg_iter: Box<dyn Iterator<Item = Value>> = if self.named_count > 0 && self.positional_count == 0 { box iter::empty() } else { match Tuple::from_value(argument) { Some(x) => { owned_tuple = x;
box owned_tuple.iter() } None => box iter::once(argument), } }; for (named_or_positional, format, tail) in self.parameters { let arg = match named_or_positional { NamedOrPositional::Positional => match arg_iter.next() { Some(a) => a, None => return Err(StringInterpolationError::NotEnoughParameters.into()), }, NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?, }; format.format_arg(&mut r, arg)?; r.push_str(&tail); } if arg_iter.next().is_some() { return Err(StringInterpolationError::TooManyParameters.into()); } Ok(r) } }
random_line_split
interpolation.rs
/* * Copyright 2018 The Starlark in Rust Authors. * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! String interpolation-related code. //! Based on <https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting> use crate::values::{tuple::Tuple, Heap, Value, ValueLike}; use gazebo::prelude::*; use std::{fmt::Write, iter}; use thiserror::Error; const AFTER_PERCENT: &str = "'%' must be followed by an optional name and a specifier ('s', 'r', 'd', 'i', 'o', 'x', 'X', 'c') or '%'"; /// Operator `%` format or evaluation errors #[derive(Clone, Dupe, Debug, Error)] enum StringInterpolationError { #[error( "Unexpected EOF in format string. Could not find ')' when parsing '%(name)f' expression" )] UnexpectedEOFClosingParen, /// `%` must be followed by specifier. #[error("Unexpected EOF in format string. {}", AFTER_PERCENT)] UnexpectedEOFPercent, #[error("Unknown format string specifier '{}'. {}",.0.escape_default(), AFTER_PERCENT)] UnknownSpecifier(char), #[error("Invalid UTF-8 codepoint 0x{:x} passed for %c formatter",.0)] ValueNotInUTFRange(u32), /// Interpolation parameter is too big for the format string. #[error("Too many arguments for format string")] TooManyParameters, /// Interpolation parameter is too small for the format string. #[error("Not enough arguments for format string")] NotEnoughParameters, #[error("'%c' formatter requires a single-character string")] ValueNotChar, } /// Format char enum ArgFormat { // str(x) Str, // repr(x) Repr, // signed integer decimal Dec, // signed octal Oct, // signed hexadecimal, lowercase HexLower, // signed hexadecimal, uppercase HexUpper, // x for string, chr(x) for int Char, // `%` sign Percent, } impl ArgFormat { fn format_arg(&self, out: &mut String, arg: Value) -> anyhow::Result<()> { match self { // Equivalent to `write!(out, "{}", arg.to_str()).unwrap()`, but avoid // allocating a separate `String` on the way. ArgFormat::Str => match arg.unpack_str() { None => arg.collect_repr(out), Some(v) => out.push_str(v), }, ArgFormat::Repr => arg.collect_repr(out), ArgFormat::Dec => write!(out, "{}", arg.to_int()?).unwrap(), ArgFormat::Oct => { let v = arg.to_int()?; write!( out, "{}{:o}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexLower => { let v = arg.to_int()?; write!( out, "{}{:x}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::HexUpper => { let v = arg.to_int()?; write!( out, "{}{:X}", if v < 0 { "-" } else { "" }, v.wrapping_abs() as u64 ) .unwrap(); } ArgFormat::Char => match arg.unpack_str() { Some(arg) => { let mut chars = arg.chars(); let c = chars.next(); match c { Some(c) if chars.next().is_none() => out.push(c), _ => return Err(StringInterpolationError::ValueNotChar.into()), } } None => { let i = arg.to_int()? as u32; match std::char::from_u32(i) { Some(c) => write!(out, "{}", c).unwrap(), None => { return Err(StringInterpolationError::ValueNotInUTFRange(i).into()); } } } }, ArgFormat::Percent => { out.push('%'); } } Ok(()) } } // %(name)s or %s enum NamedOrPositional { Named(String), Positional, } /// Implement Python `%` format strings. pub struct Interpolation { /// String before first parameter init: String, /// Number of positional arguments positional_count: usize, /// Number of named arguments named_count: usize, /// Arguments followed by uninterpreted strings parameters: Vec<(NamedOrPositional, ArgFormat, String)>, } impl Interpolation { fn append_literal(&mut self, c: char) { if let Some(p) = self.parameters.last_mut() { p.2.push(c); } else { self.init.push(c) } } /// Parse a percent-interpolation string, returning an `Err` if the string is invalid. pub fn parse(format: &str) -> anyhow::Result<Self>
return Err( StringInterpolationError::UnexpectedEOFClosingParen.into() ); } Some(')') => { break; } Some(c) => name.push(c), } } ( NamedOrPositional::Named(name), chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?, ) } else { (NamedOrPositional::Positional, next) }; let format = match format_char { 's' => ArgFormat::Str, 'r' => ArgFormat::Repr, 'd' | 'i' => ArgFormat::Dec, 'o' => ArgFormat::Oct, 'x' => ArgFormat::HexLower, 'X' => ArgFormat::HexUpper, 'c' => ArgFormat::Char, '%' => match named_or_positional { NamedOrPositional::Positional => { result.append_literal('%'); continue; } NamedOrPositional::Named(_) => { // In both Python and Starlark Go implementations // `%(n)%` consumes named argument, but // `%%` does not consume positional argument. // So `Percent` variant is added only when `ArgFormat` is `Named`. ArgFormat::Percent } }, c => return Err(StringInterpolationError::UnknownSpecifier(c).into()), }; match named_or_positional { NamedOrPositional::Positional => { result.positional_count += 1; } NamedOrPositional::Named(..) => { result.named_count += 1; } } result .parameters .push((named_or_positional, format, String::new())); } } Ok(result) } /// Apply a percent-interpolation string to a value. pub fn apply<'v>(self, argument: Value<'v>, heap: &'v Heap) -> anyhow::Result<String> { let mut r = self.init; let owned_tuple; let mut arg_iter: Box<dyn Iterator<Item = Value>> = if self.named_count > 0 && self.positional_count == 0 { box iter::empty() } else { match Tuple::from_value(argument) { Some(x) => { owned_tuple = x; box owned_tuple.iter() } None => box iter::once(argument), } }; for (named_or_positional, format, tail) in self.parameters { let arg = match named_or_positional { NamedOrPositional::Positional => match arg_iter.next() { Some(a) => a, None => return Err(StringInterpolationError::NotEnoughParameters.into()), }, NamedOrPositional::Named(name) => argument.at(heap.alloc(name), heap)?, }; format.format_arg(&mut r, arg)?; r.push_str(&tail); } if arg_iter.next().is_some() { return Err(StringInterpolationError::TooManyParameters.into()); } Ok(r) } }
{ let mut result = Self { init: String::new(), positional_count: 0, named_count: 0, parameters: Vec::new(), }; let mut chars = format.chars(); while let Some(c) = chars.next() { if c != '%' { result.append_literal(c); } else { let next = chars .next() .ok_or(StringInterpolationError::UnexpectedEOFPercent)?; let (named_or_positional, format_char) = if next == '(' { let mut name = String::new(); loop { match chars.next() { None => {
identifier_body
loading.rs
extern crate quicksilver; extern crate json; use quicksilver::prelude::*; use std::collections::HashMap; use itertools::Itertools; use std::iter; use crate::game_logic::{BoardState}; use crate::game_control::*; use crate::game_objects::*; use crate::ai::AI; use crate::automaton::{AutomatonState, GameEvent}; use std::mem::take; use futures::{Async}; use derivative::*; use quicksilver::Error as QuicksilverError; use quicksilver::combinators::{join_all, JoinAll, Join}; use std::rc::Rc; pub const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf"; pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png"; #[derive(Derivative, Default)] #[derivative(Debug)] pub struct Assets { #[derivative(Debug = "ignore")] pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images } type CardFactory = HashMap<String, Card>; fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> { counts.iter() .flat_map(|(key, num)| iter::repeat(key).take(*num)) .filter_map(|key| factory.get(key)) .cloned() .collect() } fn
(json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck { let deck_node = { json.get(node_name) .expect(format!("Deck node \"{}\" not found", node_name).as_str()) .clone() }; let data: HashMap<String, usize> = serde_json::from_value(deck_node) .expect("Malformed deck list"); let mut deck = Deck::from(cards_by_counts(card_factory, data)); deck.shuffle(); deck } fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store { let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone(); let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description"); match store_type.clone() { StoreType::Fixed { items } => { let cards = items.iter() .filter_map(|name| factory.get(name)) .map(|card| card.clone()) .collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: None }, deck: None, } } StoreType::Drafted { size, from_deck } => { let mut deck = parse_deck(json, &from_deck, factory); deck.shuffle(); let cards = (0..size).filter_map(|_| deck.draw()).collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: Some(size) }, deck: Some(Box::new(deck)), } } } } fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer { let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone(); let data: HashMap<String, usize> = serde_json::from_value(source_node) .expect("Malformed node"); CardContainer { zone: zone, cards: cards_by_counts(factory, data), size: None, } } pub fn load_players(json: &serde_json::Value) -> Vec<Player> { let player_node = json.get("players") .expect("file should have \"players\" node.") .clone(); let mut players: Vec<Player> = serde_json::from_value(player_node) .expect("Malformed player node"); let game_type = json.get("game_type") .expect("game type not specified") .as_str() .expect("game type not string"); match game_type.to_lowercase().as_str() { "vs" => { assert_eq!(players.len(), 2, "For VS game, only 2 players are possible"); players[0].opponent_idx = 1; players[1].opponent_idx = 0; }, _ => panic!("Unknown game type") } players } pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState { let store_node = "build_store"; let trade_row = "kaiju_store"; let hand_size = 5; let draw_deck = parse_deck(&json, &player.starting_deck, card_factory); //let bs_node = { json.get("build_store").expect("build_store node not found").clone() }; let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory); //let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() }; let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory); let hand = CardContainer::new_sized(BoardZone::Hand, hand_size); let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory); let kaiju = CardContainer::new(BoardZone::Kaiju); let ai = match player.control { PlayerControl::Human => None, PlayerControl::AI => Some(AI::new()) }; println!("Loading done"); BoardState { player: player, turn: 1, hand: Box::new(hand), deck: Box::new(draw_deck), globals: NumberMap::new(), stores: Box::new(vec!(build_store, kaiju_store)), buildings: Box::new(buildings), kaiju_zone: Box::new(kaiju), ai: ai } } /// Loading state: loads all assets to memory and passes them to GameplayState. /// /// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional /// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic /// to just load them all to RAM and use them directly. /// /// Loading in Quicksilver is internally done using Futures (that can, but don't have to /// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures). /// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and /// Async::Ready when its data are ready (i. e. loading is done). /// It must not be called afterwards: it would panic. /// /// It turns out this is perfect fit for our application: we combine all assets into single Future, /// hook it into our event loop, polling it every update, while drawing a loading screen. When it /// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue. /// /// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but /// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same /// version that Quicksilver uses and use that. #[derive(Derivative, Default)] #[derivative(Debug)] pub struct LoadingState { board_states: Vec<BoardState>, image_names: Vec<String>, font_names: Vec<String>, #[derivative(Debug = "ignore")] // Option just to get Default loading: Option< Join< JoinAll< Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>> >, JoinAll< Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>> > > >, } impl LoadingState { pub fn new() -> Box<Self> { let font_names = vec![CARD_TITLE_FONT.to_string()]; let file = load_file("cards_expanded.json") .wait() .expect("file should open read only"); // TODO: do this asynchronously, too let json: serde_json::Value = serde_json::from_slice(file.as_slice()) .expect("file should be proper JSON"); let cards: CardFactory = serde_json::from_value( json.get("cards").expect("file should have \"cards\" node").clone() ).expect("malformed card list"); let mut image_names = cards.values() .map(|v| v.image.clone()) .unique() .collect::<Vec<String>>(); image_names.push(CARD_BACKGROUND_IMG.to_string()); println!("Loading fonts {:?} and images: {:?}", font_names, image_names); let loading_images = join_all( font_names.iter() .map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ).join( join_all( image_names.iter() .map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ) ); let players = load_players(&json); let board_states = players.iter() .map(|p| load_board(&json, &cards, p.clone())) .collect(); //let board_state = load_board(json); Box::new(Self { board_states, image_names, font_names, loading: Some(loading_images), }) } } impl AutomatonState for LoadingState { fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> { Box::new(take(self)) } fn update(&mut self) -> Box<dyn AutomatonState> { let result = self.loading.as_mut().unwrap().poll(); match result { // We use draining iterators to take ownership Ok(Async::Ready((mut fonts, mut images))) => { let mut loaded_fonts = HashMap::new(); for (k, v) in self.font_names.drain(..).zip(fonts.drain(..)) { loaded_fonts.insert(k, Box::new(v)); } let mut loaded_images = HashMap::new(); for (k, v) in self.image_names.drain(..).zip(images.drain(..)) { loaded_images.insert(k, Rc::new(v)); } let mut control_state = Box::new(GameControlState::new( self.board_states.clone(), Assets { fonts: loaded_fonts, images: loaded_images, }, )); // TODO async load board control_state.overtake() } Ok(Async::NotReady) => { Box::new(take(self)) } Err(_) => { panic!("Can't load images") } // Value in Err is from another thread, and is not Sync. Yes, really. } } fn draw(&self, window: &mut Window) -> () { window.draw(&Circle::new((300, 300), 32), Col(Color::BLUE)); } }
parse_deck
identifier_name
loading.rs
extern crate quicksilver; extern crate json; use quicksilver::prelude::*; use std::collections::HashMap; use itertools::Itertools; use std::iter; use crate::game_logic::{BoardState}; use crate::game_control::*; use crate::game_objects::*; use crate::ai::AI; use crate::automaton::{AutomatonState, GameEvent}; use std::mem::take; use futures::{Async}; use derivative::*; use quicksilver::Error as QuicksilverError; use quicksilver::combinators::{join_all, JoinAll, Join}; use std::rc::Rc; pub const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf"; pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png"; #[derive(Derivative, Default)] #[derivative(Debug)] pub struct Assets { #[derivative(Debug = "ignore")] pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images } type CardFactory = HashMap<String, Card>; fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> { counts.iter() .flat_map(|(key, num)| iter::repeat(key).take(*num)) .filter_map(|key| factory.get(key)) .cloned() .collect() } fn parse_deck(json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck { let deck_node = { json.get(node_name) .expect(format!("Deck node \"{}\" not found", node_name).as_str()) .clone() }; let data: HashMap<String, usize> = serde_json::from_value(deck_node) .expect("Malformed deck list"); let mut deck = Deck::from(cards_by_counts(card_factory, data)); deck.shuffle(); deck } fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store { let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone(); let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description"); match store_type.clone() { StoreType::Fixed { items } => { let cards = items.iter() .filter_map(|name| factory.get(name)) .map(|card| card.clone()) .collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: None }, deck: None, } } StoreType::Drafted { size, from_deck } => { let mut deck = parse_deck(json, &from_deck, factory); deck.shuffle(); let cards = (0..size).filter_map(|_| deck.draw()).collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: Some(size) }, deck: Some(Box::new(deck)), } } } } fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer { let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone(); let data: HashMap<String, usize> = serde_json::from_value(source_node) .expect("Malformed node"); CardContainer { zone: zone, cards: cards_by_counts(factory, data), size: None, } } pub fn load_players(json: &serde_json::Value) -> Vec<Player> { let player_node = json.get("players") .expect("file should have \"players\" node.") .clone(); let mut players: Vec<Player> = serde_json::from_value(player_node) .expect("Malformed player node"); let game_type = json.get("game_type")
match game_type.to_lowercase().as_str() { "vs" => { assert_eq!(players.len(), 2, "For VS game, only 2 players are possible"); players[0].opponent_idx = 1; players[1].opponent_idx = 0; }, _ => panic!("Unknown game type") } players } pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState { let store_node = "build_store"; let trade_row = "kaiju_store"; let hand_size = 5; let draw_deck = parse_deck(&json, &player.starting_deck, card_factory); //let bs_node = { json.get("build_store").expect("build_store node not found").clone() }; let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory); //let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() }; let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory); let hand = CardContainer::new_sized(BoardZone::Hand, hand_size); let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory); let kaiju = CardContainer::new(BoardZone::Kaiju); let ai = match player.control { PlayerControl::Human => None, PlayerControl::AI => Some(AI::new()) }; println!("Loading done"); BoardState { player: player, turn: 1, hand: Box::new(hand), deck: Box::new(draw_deck), globals: NumberMap::new(), stores: Box::new(vec!(build_store, kaiju_store)), buildings: Box::new(buildings), kaiju_zone: Box::new(kaiju), ai: ai } } /// Loading state: loads all assets to memory and passes them to GameplayState. /// /// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional /// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic /// to just load them all to RAM and use them directly. /// /// Loading in Quicksilver is internally done using Futures (that can, but don't have to /// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures). /// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and /// Async::Ready when its data are ready (i. e. loading is done). /// It must not be called afterwards: it would panic. /// /// It turns out this is perfect fit for our application: we combine all assets into single Future, /// hook it into our event loop, polling it every update, while drawing a loading screen. When it /// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue. /// /// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but /// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same /// version that Quicksilver uses and use that. #[derive(Derivative, Default)] #[derivative(Debug)] pub struct LoadingState { board_states: Vec<BoardState>, image_names: Vec<String>, font_names: Vec<String>, #[derivative(Debug = "ignore")] // Option just to get Default loading: Option< Join< JoinAll< Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>> >, JoinAll< Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>> > > >, } impl LoadingState { pub fn new() -> Box<Self> { let font_names = vec![CARD_TITLE_FONT.to_string()]; let file = load_file("cards_expanded.json") .wait() .expect("file should open read only"); // TODO: do this asynchronously, too let json: serde_json::Value = serde_json::from_slice(file.as_slice()) .expect("file should be proper JSON"); let cards: CardFactory = serde_json::from_value( json.get("cards").expect("file should have \"cards\" node").clone() ).expect("malformed card list"); let mut image_names = cards.values() .map(|v| v.image.clone()) .unique() .collect::<Vec<String>>(); image_names.push(CARD_BACKGROUND_IMG.to_string()); println!("Loading fonts {:?} and images: {:?}", font_names, image_names); let loading_images = join_all( font_names.iter() .map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ).join( join_all( image_names.iter() .map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ) ); let players = load_players(&json); let board_states = players.iter() .map(|p| load_board(&json, &cards, p.clone())) .collect(); //let board_state = load_board(json); Box::new(Self { board_states, image_names, font_names, loading: Some(loading_images), }) } } impl AutomatonState for LoadingState { fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> { Box::new(take(self)) } fn update(&mut self) -> Box<dyn AutomatonState> { let result = self.loading.as_mut().unwrap().poll(); match result { // We use draining iterators to take ownership Ok(Async::Ready((mut fonts, mut images))) => { let mut loaded_fonts = HashMap::new(); for (k, v) in self.font_names.drain(..).zip(fonts.drain(..)) { loaded_fonts.insert(k, Box::new(v)); } let mut loaded_images = HashMap::new(); for (k, v) in self.image_names.drain(..).zip(images.drain(..)) { loaded_images.insert(k, Rc::new(v)); } let mut control_state = Box::new(GameControlState::new( self.board_states.clone(), Assets { fonts: loaded_fonts, images: loaded_images, }, )); // TODO async load board control_state.overtake() } Ok(Async::NotReady) => { Box::new(take(self)) } Err(_) => { panic!("Can't load images") } // Value in Err is from another thread, and is not Sync. Yes, really. } } fn draw(&self, window: &mut Window) -> () { window.draw(&Circle::new((300, 300), 32), Col(Color::BLUE)); } }
.expect("game type not specified") .as_str() .expect("game type not string");
random_line_split
loading.rs
extern crate quicksilver; extern crate json; use quicksilver::prelude::*; use std::collections::HashMap; use itertools::Itertools; use std::iter; use crate::game_logic::{BoardState}; use crate::game_control::*; use crate::game_objects::*; use crate::ai::AI; use crate::automaton::{AutomatonState, GameEvent}; use std::mem::take; use futures::{Async}; use derivative::*; use quicksilver::Error as QuicksilverError; use quicksilver::combinators::{join_all, JoinAll, Join}; use std::rc::Rc; pub const CARD_TITLE_FONT: &'static str = "Teko-Regular.ttf"; pub const CARD_BACKGROUND_IMG: &'static str = "card_bg.png"; #[derive(Derivative, Default)] #[derivative(Debug)] pub struct Assets { #[derivative(Debug = "ignore")] pub fonts: HashMap<String, Box<Font>>, // we borrow fonts to create new data: there's no reason to hold it pub images: HashMap<String, Rc<Image>>, // UI cards do hold reference to images } type CardFactory = HashMap<String, Card>; fn cards_by_counts(factory: &CardFactory, counts: HashMap<String, usize>) -> Vec<Card> { counts.iter() .flat_map(|(key, num)| iter::repeat(key).take(*num)) .filter_map(|key| factory.get(key)) .cloned() .collect() } fn parse_deck(json: &serde_json::value::Value, node_name: &str, card_factory: &CardFactory) -> Deck { let deck_node = { json.get(node_name) .expect(format!("Deck node \"{}\" not found", node_name).as_str()) .clone() }; let data: HashMap<String, usize> = serde_json::from_value(deck_node) .expect("Malformed deck list"); let mut deck = Deck::from(cards_by_counts(card_factory, data)); deck.shuffle(); deck } fn parse_store(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> Store { let source_node = json.get(node).expect(format!("store node {} not found", node).as_str()).clone(); let store_type: StoreType = serde_json::from_value(source_node).expect("Malformed store description"); match store_type.clone() { StoreType::Fixed { items } => { let cards = items.iter() .filter_map(|name| factory.get(name)) .map(|card| card.clone()) .collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: None }, deck: None, } } StoreType::Drafted { size, from_deck } => { let mut deck = parse_deck(json, &from_deck, factory); deck.shuffle(); let cards = (0..size).filter_map(|_| deck.draw()).collect(); Store { store_type: store_type, menu: CardContainer { zone: zone, cards: cards, size: Some(size) }, deck: Some(Box::new(deck)), } } } } fn container_counts(zone: BoardZone, json: &serde_json::value::Value, node: &str, factory: &CardFactory) -> CardContainer { let source_node = json.get(node).expect(format!("count node {} not found", node).as_str()).clone(); let data: HashMap<String, usize> = serde_json::from_value(source_node) .expect("Malformed node"); CardContainer { zone: zone, cards: cards_by_counts(factory, data), size: None, } } pub fn load_players(json: &serde_json::Value) -> Vec<Player> { let player_node = json.get("players") .expect("file should have \"players\" node.") .clone(); let mut players: Vec<Player> = serde_json::from_value(player_node) .expect("Malformed player node"); let game_type = json.get("game_type") .expect("game type not specified") .as_str() .expect("game type not string"); match game_type.to_lowercase().as_str() { "vs" => { assert_eq!(players.len(), 2, "For VS game, only 2 players are possible"); players[0].opponent_idx = 1; players[1].opponent_idx = 0; }, _ => panic!("Unknown game type") } players } pub fn load_board(json: &serde_json::Value, card_factory: &CardFactory, player: Player) -> BoardState
PlayerControl::Human => None, PlayerControl::AI => Some(AI::new()) }; println!("Loading done"); BoardState { player: player, turn: 1, hand: Box::new(hand), deck: Box::new(draw_deck), globals: NumberMap::new(), stores: Box::new(vec!(build_store, kaiju_store)), buildings: Box::new(buildings), kaiju_zone: Box::new(kaiju), ai: ai } } /// Loading state: loads all assets to memory and passes them to GameplayState. /// /// The asset loading in Quicksilver (as described in tutorial) is awkward: it requires conditional /// execution whenever any asset is used. As we don't have large amount of data, it is more ergonomic /// to just load them all to RAM and use them directly. /// /// Loading in Quicksilver is internally done using Futures (that can, but don't have to /// be wrapped in Assets). Futures can be nested using combinators (that themselves are Futures). /// Every Future has a poll() method that returns Async::NotReady when it is not yet done, and /// Async::Ready when its data are ready (i. e. loading is done). /// It must not be called afterwards: it would panic. /// /// It turns out this is perfect fit for our application: we combine all assets into single Future, /// hook it into our event loop, polling it every update, while drawing a loading screen. When it /// becomes ready, we construct a new State, pass it all the assets extracted from the Future and continue. /// /// Sadly, it is complicated by the fact that Quicksilver re-exports Future trait and combinators, but /// not the Async enum. As this enum comes from "futures" crate, we just install it in the exact same /// version that Quicksilver uses and use that. #[derive(Derivative, Default)] #[derivative(Debug)] pub struct LoadingState { board_states: Vec<BoardState>, image_names: Vec<String>, font_names: Vec<String>, #[derivative(Debug = "ignore")] // Option just to get Default loading: Option< Join< JoinAll< Vec<Box<dyn Future<Item=Font, Error=QuicksilverError>>> >, JoinAll< Vec<Box<dyn Future<Item=Image, Error=QuicksilverError>>> > > >, } impl LoadingState { pub fn new() -> Box<Self> { let font_names = vec![CARD_TITLE_FONT.to_string()]; let file = load_file("cards_expanded.json") .wait() .expect("file should open read only"); // TODO: do this asynchronously, too let json: serde_json::Value = serde_json::from_slice(file.as_slice()) .expect("file should be proper JSON"); let cards: CardFactory = serde_json::from_value( json.get("cards").expect("file should have \"cards\" node").clone() ).expect("malformed card list"); let mut image_names = cards.values() .map(|v| v.image.clone()) .unique() .collect::<Vec<String>>(); image_names.push(CARD_BACKGROUND_IMG.to_string()); println!("Loading fonts {:?} and images: {:?}", font_names, image_names); let loading_images = join_all( font_names.iter() .map(|i| Box::new(Font::load(i.clone())) as Box<dyn Future<Item=Font, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ).join( join_all( image_names.iter() .map(|i| Box::new(Image::load(i.clone())) as Box<dyn Future<Item=Image, Error=QuicksilverError>>) .collect::<Vec<Box<_>>>() ) ); let players = load_players(&json); let board_states = players.iter() .map(|p| load_board(&json, &cards, p.clone())) .collect(); //let board_state = load_board(json); Box::new(Self { board_states, image_names, font_names, loading: Some(loading_images), }) } } impl AutomatonState for LoadingState { fn event(&mut self, event: GameEvent) -> Box<dyn AutomatonState> { Box::new(take(self)) } fn update(&mut self) -> Box<dyn AutomatonState> { let result = self.loading.as_mut().unwrap().poll(); match result { // We use draining iterators to take ownership Ok(Async::Ready((mut fonts, mut images))) => { let mut loaded_fonts = HashMap::new(); for (k, v) in self.font_names.drain(..).zip(fonts.drain(..)) { loaded_fonts.insert(k, Box::new(v)); } let mut loaded_images = HashMap::new(); for (k, v) in self.image_names.drain(..).zip(images.drain(..)) { loaded_images.insert(k, Rc::new(v)); } let mut control_state = Box::new(GameControlState::new( self.board_states.clone(), Assets { fonts: loaded_fonts, images: loaded_images, }, )); // TODO async load board control_state.overtake() } Ok(Async::NotReady) => { Box::new(take(self)) } Err(_) => { panic!("Can't load images") } // Value in Err is from another thread, and is not Sync. Yes, really. } } fn draw(&self, window: &mut Window) -> () { window.draw(&Circle::new((300, 300), 32), Col(Color::BLUE)); } }
{ let store_node = "build_store"; let trade_row = "kaiju_store"; let hand_size = 5; let draw_deck = parse_deck(&json, &player.starting_deck, card_factory); //let bs_node = { json.get("build_store").expect("build_store node not found").clone() }; let build_store = parse_store(BoardZone::BuildStore, &json, store_node, card_factory); //let ks_node = { json.get("kaiju_store").expect("kaiju_store node not found").clone() }; let kaiju_store = parse_store(BoardZone::KaijuStore, &json, trade_row, card_factory); let hand = CardContainer::new_sized(BoardZone::Hand, hand_size); let buildings = container_counts(BoardZone::Buildings, &json, &player.starting_buildings, card_factory); let kaiju = CardContainer::new(BoardZone::Kaiju); let ai = match player.control {
identifier_body
episode.rs
use std::cmp; use std::path::Path; use std::u32; use byteorder::{ByteOrder, BE}; use fst::{self, IntoStreamer, Streamer}; use crate::error::{Error, Result}; use crate::index::csv_file; use crate::record::Episode; use crate::util::{IMDB_EPISODE, fst_set_builder_file, fst_set_file}; /// The name of the episode index file. /// /// The episode index maps TV show ids to episodes. The index is constructed /// in a way where either of the following things can be used as look up keys: /// /// tvshow IMDb title ID /// (tvshow IMDb title ID, season number) /// /// In particular, the index itself stores the entire episode record, and it /// can be re-constituted without re-visiting the original episode data file. const SEASONS: &str = "episode.seasons.fst"; /// The name of the TV show index file. /// /// The TV show index maps episode IMDb title IDs to tvshow IMDb title IDs. /// This allows us to quickly look up the TV show corresponding to an episode /// in search results. /// /// The format of this index is an FST set, where each key corresponds to the /// episode ID joined with the TV show ID by a `NUL` byte. This lets us do /// a range query on the set when given the episode ID to find the TV show ID. const TVSHOWS: &str = "episode.tvshows.fst"; /// An episode index that supports retrieving season and episode information /// quickly. #[derive(Debug)] pub struct Index { seasons: fst::Set, tvshows: fst::Set, } impl Index { /// Open an episode index from the given index directory. pub fn open<P: AsRef<Path>>(index_dir: P) -> Result<Index> { let index_dir = index_dir.as_ref(); // We claim it is safe to open the following memory map because we // don't mutate them and no other process (should) either. let seasons = unsafe { fst_set_file(index_dir.join(SEASONS))? }; let tvshows = unsafe { fst_set_file(index_dir.join(TVSHOWS))? }; Ok(Index { seasons: seasons, tvshows: tvshows, }) } /// Create an episode index from the given IMDb data directory and write /// it to the given index directory. If an episode index already exists, /// then it is overwritten. pub fn create<P1: AsRef<Path>, P2: AsRef<Path>>( data_dir: P1, index_dir: P2, ) -> Result<Index> { let data_dir = data_dir.as_ref(); let index_dir = index_dir.as_ref(); let mut buf = vec![]; let mut seasons = fst_set_builder_file(index_dir.join(SEASONS))?; let mut tvshows = fst_set_builder_file(index_dir.join(TVSHOWS))?; let mut episodes = read_sorted_episodes(data_dir)?; for episode in &episodes { buf.clear(); write_episode(episode, &mut buf)?; seasons.insert(&buf).map_err(Error::fst)?; } episodes.sort_by(|e1, e2| { (&e1.id, &e1.tvshow_id).cmp(&(&e2.id, &e2.tvshow_id)) }); for episode in &episodes { buf.clear(); write_tvshow(&episode, &mut buf)?; tvshows.insert(&buf).map_err(Error::fst)?; } seasons.finish().map_err(Error::fst)?; tvshows.finish().map_err(Error::fst)?; log::info!("{} episodes indexed", episodes.len()); Index::open(index_dir) } /// Return a sequence of episodes for the given TV show IMDb identifier. /// /// The episodes are sorted in order of season number and episode number. /// Episodes without season/episode numbers are sorted after episodes with /// numbers. pub fn seasons(&self, tvshow_id: &[u8]) -> Result<Vec<Episode>> { let mut upper = tvshow_id.to_vec(); upper.push(0xFF); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(tvshow_id) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return a sequence of episodes for the given TV show IMDb identifier and /// season number. /// /// The episodes are sorted in order of episode number. Episodes without /// episode numbers are sorted after episodes with numbers. pub fn episodes( &self, tvshow_id: &[u8], season: u32, ) -> Result<Vec<Episode>> { let mut lower = tvshow_id.to_vec(); lower.push(0x00); lower.extend_from_slice(&u32_to_bytes(season)); lower.extend_from_slice(&u32_to_bytes(0)); let mut upper = tvshow_id.to_vec(); upper.push(0x00); upper.extend_from_slice(&u32_to_bytes(season)); upper.extend_from_slice(&u32_to_bytes(u32::MAX)); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(lower) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return the episode information for the given episode IMDb identifier. /// /// If no episode information for the given ID exists, then `None` is /// returned. pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> { let mut upper = episode_id.to_vec(); upper.push(0xFF); let mut stream = self.tvshows.range() .ge(episode_id) .le(upper) .into_stream(); while let Some(tvshow_bytes) = stream.next() { return Ok(Some(read_tvshow(tvshow_bytes)?)); } Ok(None) } } fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> { // We claim it is safe to open the following memory map because we don't // mutate them and no other process (should) either. let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?; let mut records = vec![]; for result in rdr.deserialize() { let record: Episode = result.map_err(Error::csv)?; records.push(record); } records.sort_by(cmp_episode); Ok(records) } fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering { let k1 = ( &ep1.tvshow_id, ep1.season.unwrap_or(u32::MAX), ep1.episode.unwrap_or(u32::MAX), &ep1.id, ); let k2 = ( &ep2.tvshow_id, ep2.season.unwrap_or(u32::MAX), ep2.episode.unwrap_or(u32::MAX), &ep2.id, ); k1.cmp(&k2) } fn read_episode(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul, None => bug!("could not find nul byte"), }; let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(id) => id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported tvshow id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.tvshow_id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.id.as_bytes()); Ok(()) } fn read_tvshow(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul,
Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported episode id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.tvshow_id.as_bytes()); Ok(()) } fn from_optional_u32(bytes: &[u8]) -> Option<u32> { match BE::read_u32(bytes) { u32::MAX => None, x => Some(x), } } fn to_optional_season(ep: &Episode) -> Result<u32> { match ep.season { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported season number {} for {:?}", x, ep); } Ok(x) } } } fn to_optional_epnum(ep: &Episode) -> Result<u32> { match ep.episode { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported episode number {} for {:?}", x, ep); } Ok(x) } } } fn u32_to_bytes(n: u32) -> [u8; 4] { let mut buf = [0u8; 4]; BE::write_u32(&mut buf, n); buf } #[cfg(test)] mod tests { use std::collections::HashMap; use crate::index::tests::TestContext; use super::Index; #[test] fn basics() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.seasons(b"tt0096697").unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } assert_eq!(counts.len(), 3); assert_eq!(counts[&1], 13); assert_eq!(counts[&2], 22); assert_eq!(counts[&3], 24); } #[test] fn by_season() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.episodes(b"tt0096697", 2).unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } println!("{:?}", counts); assert_eq!(counts.len(), 1); assert_eq!(counts[&2], 22); } #[test] fn tvshow() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let ep = idx.episode(b"tt0701063").unwrap().unwrap(); assert_eq!(ep.tvshow_id, "tt0096697"); } }
None => bug!("could not find nul byte"), }; let id = match String::from_utf8(bytes[..nul].to_vec()) {
random_line_split
episode.rs
use std::cmp; use std::path::Path; use std::u32; use byteorder::{ByteOrder, BE}; use fst::{self, IntoStreamer, Streamer}; use crate::error::{Error, Result}; use crate::index::csv_file; use crate::record::Episode; use crate::util::{IMDB_EPISODE, fst_set_builder_file, fst_set_file}; /// The name of the episode index file. /// /// The episode index maps TV show ids to episodes. The index is constructed /// in a way where either of the following things can be used as look up keys: /// /// tvshow IMDb title ID /// (tvshow IMDb title ID, season number) /// /// In particular, the index itself stores the entire episode record, and it /// can be re-constituted without re-visiting the original episode data file. const SEASONS: &str = "episode.seasons.fst"; /// The name of the TV show index file. /// /// The TV show index maps episode IMDb title IDs to tvshow IMDb title IDs. /// This allows us to quickly look up the TV show corresponding to an episode /// in search results. /// /// The format of this index is an FST set, where each key corresponds to the /// episode ID joined with the TV show ID by a `NUL` byte. This lets us do /// a range query on the set when given the episode ID to find the TV show ID. const TVSHOWS: &str = "episode.tvshows.fst"; /// An episode index that supports retrieving season and episode information /// quickly. #[derive(Debug)] pub struct Index { seasons: fst::Set, tvshows: fst::Set, } impl Index { /// Open an episode index from the given index directory. pub fn open<P: AsRef<Path>>(index_dir: P) -> Result<Index> { let index_dir = index_dir.as_ref(); // We claim it is safe to open the following memory map because we // don't mutate them and no other process (should) either. let seasons = unsafe { fst_set_file(index_dir.join(SEASONS))? }; let tvshows = unsafe { fst_set_file(index_dir.join(TVSHOWS))? }; Ok(Index { seasons: seasons, tvshows: tvshows, }) } /// Create an episode index from the given IMDb data directory and write /// it to the given index directory. If an episode index already exists, /// then it is overwritten. pub fn create<P1: AsRef<Path>, P2: AsRef<Path>>( data_dir: P1, index_dir: P2, ) -> Result<Index> { let data_dir = data_dir.as_ref(); let index_dir = index_dir.as_ref(); let mut buf = vec![]; let mut seasons = fst_set_builder_file(index_dir.join(SEASONS))?; let mut tvshows = fst_set_builder_file(index_dir.join(TVSHOWS))?; let mut episodes = read_sorted_episodes(data_dir)?; for episode in &episodes { buf.clear(); write_episode(episode, &mut buf)?; seasons.insert(&buf).map_err(Error::fst)?; } episodes.sort_by(|e1, e2| { (&e1.id, &e1.tvshow_id).cmp(&(&e2.id, &e2.tvshow_id)) }); for episode in &episodes { buf.clear(); write_tvshow(&episode, &mut buf)?; tvshows.insert(&buf).map_err(Error::fst)?; } seasons.finish().map_err(Error::fst)?; tvshows.finish().map_err(Error::fst)?; log::info!("{} episodes indexed", episodes.len()); Index::open(index_dir) } /// Return a sequence of episodes for the given TV show IMDb identifier. /// /// The episodes are sorted in order of season number and episode number. /// Episodes without season/episode numbers are sorted after episodes with /// numbers. pub fn seasons(&self, tvshow_id: &[u8]) -> Result<Vec<Episode>> { let mut upper = tvshow_id.to_vec(); upper.push(0xFF); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(tvshow_id) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return a sequence of episodes for the given TV show IMDb identifier and /// season number. /// /// The episodes are sorted in order of episode number. Episodes without /// episode numbers are sorted after episodes with numbers. pub fn episodes( &self, tvshow_id: &[u8], season: u32, ) -> Result<Vec<Episode>> { let mut lower = tvshow_id.to_vec(); lower.push(0x00); lower.extend_from_slice(&u32_to_bytes(season)); lower.extend_from_slice(&u32_to_bytes(0)); let mut upper = tvshow_id.to_vec(); upper.push(0x00); upper.extend_from_slice(&u32_to_bytes(season)); upper.extend_from_slice(&u32_to_bytes(u32::MAX)); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(lower) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return the episode information for the given episode IMDb identifier. /// /// If no episode information for the given ID exists, then `None` is /// returned. pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> { let mut upper = episode_id.to_vec(); upper.push(0xFF); let mut stream = self.tvshows.range() .ge(episode_id) .le(upper) .into_stream(); while let Some(tvshow_bytes) = stream.next() { return Ok(Some(read_tvshow(tvshow_bytes)?)); } Ok(None) } } fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> { // We claim it is safe to open the following memory map because we don't // mutate them and no other process (should) either. let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?; let mut records = vec![]; for result in rdr.deserialize() { let record: Episode = result.map_err(Error::csv)?; records.push(record); } records.sort_by(cmp_episode); Ok(records) } fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering { let k1 = ( &ep1.tvshow_id, ep1.season.unwrap_or(u32::MAX), ep1.episode.unwrap_or(u32::MAX), &ep1.id, ); let k2 = ( &ep2.tvshow_id, ep2.season.unwrap_or(u32::MAX), ep2.episode.unwrap_or(u32::MAX), &ep2.id, ); k1.cmp(&k2) } fn read_episode(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul, None => bug!("could not find nul byte"), }; let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(id) => id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported tvshow id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.tvshow_id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.id.as_bytes()); Ok(()) } fn read_tvshow(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul, None => bug!("could not find nul byte"), }; let id = match String::from_utf8(bytes[..nul].to_vec()) { Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported episode id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.tvshow_id.as_bytes()); Ok(()) } fn from_optional_u32(bytes: &[u8]) -> Option<u32> { match BE::read_u32(bytes) { u32::MAX => None, x => Some(x), } } fn to_optional_season(ep: &Episode) -> Result<u32> { match ep.season { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported season number {} for {:?}", x, ep); } Ok(x) } } } fn to_optional_epnum(ep: &Episode) -> Result<u32> { match ep.episode { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported episode number {} for {:?}", x, ep); } Ok(x) } } } fn u32_to_bytes(n: u32) -> [u8; 4] { let mut buf = [0u8; 4]; BE::write_u32(&mut buf, n); buf } #[cfg(test)] mod tests { use std::collections::HashMap; use crate::index::tests::TestContext; use super::Index; #[test] fn basics() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.seasons(b"tt0096697").unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } assert_eq!(counts.len(), 3); assert_eq!(counts[&1], 13); assert_eq!(counts[&2], 22); assert_eq!(counts[&3], 24); } #[test] fn by_season() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.episodes(b"tt0096697", 2).unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } println!("{:?}", counts); assert_eq!(counts.len(), 1); assert_eq!(counts[&2], 22); } #[test] fn tvshow()
}
{ let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let ep = idx.episode(b"tt0701063").unwrap().unwrap(); assert_eq!(ep.tvshow_id, "tt0096697"); }
identifier_body
episode.rs
use std::cmp; use std::path::Path; use std::u32; use byteorder::{ByteOrder, BE}; use fst::{self, IntoStreamer, Streamer}; use crate::error::{Error, Result}; use crate::index::csv_file; use crate::record::Episode; use crate::util::{IMDB_EPISODE, fst_set_builder_file, fst_set_file}; /// The name of the episode index file. /// /// The episode index maps TV show ids to episodes. The index is constructed /// in a way where either of the following things can be used as look up keys: /// /// tvshow IMDb title ID /// (tvshow IMDb title ID, season number) /// /// In particular, the index itself stores the entire episode record, and it /// can be re-constituted without re-visiting the original episode data file. const SEASONS: &str = "episode.seasons.fst"; /// The name of the TV show index file. /// /// The TV show index maps episode IMDb title IDs to tvshow IMDb title IDs. /// This allows us to quickly look up the TV show corresponding to an episode /// in search results. /// /// The format of this index is an FST set, where each key corresponds to the /// episode ID joined with the TV show ID by a `NUL` byte. This lets us do /// a range query on the set when given the episode ID to find the TV show ID. const TVSHOWS: &str = "episode.tvshows.fst"; /// An episode index that supports retrieving season and episode information /// quickly. #[derive(Debug)] pub struct Index { seasons: fst::Set, tvshows: fst::Set, } impl Index { /// Open an episode index from the given index directory. pub fn open<P: AsRef<Path>>(index_dir: P) -> Result<Index> { let index_dir = index_dir.as_ref(); // We claim it is safe to open the following memory map because we // don't mutate them and no other process (should) either. let seasons = unsafe { fst_set_file(index_dir.join(SEASONS))? }; let tvshows = unsafe { fst_set_file(index_dir.join(TVSHOWS))? }; Ok(Index { seasons: seasons, tvshows: tvshows, }) } /// Create an episode index from the given IMDb data directory and write /// it to the given index directory. If an episode index already exists, /// then it is overwritten. pub fn create<P1: AsRef<Path>, P2: AsRef<Path>>( data_dir: P1, index_dir: P2, ) -> Result<Index> { let data_dir = data_dir.as_ref(); let index_dir = index_dir.as_ref(); let mut buf = vec![]; let mut seasons = fst_set_builder_file(index_dir.join(SEASONS))?; let mut tvshows = fst_set_builder_file(index_dir.join(TVSHOWS))?; let mut episodes = read_sorted_episodes(data_dir)?; for episode in &episodes { buf.clear(); write_episode(episode, &mut buf)?; seasons.insert(&buf).map_err(Error::fst)?; } episodes.sort_by(|e1, e2| { (&e1.id, &e1.tvshow_id).cmp(&(&e2.id, &e2.tvshow_id)) }); for episode in &episodes { buf.clear(); write_tvshow(&episode, &mut buf)?; tvshows.insert(&buf).map_err(Error::fst)?; } seasons.finish().map_err(Error::fst)?; tvshows.finish().map_err(Error::fst)?; log::info!("{} episodes indexed", episodes.len()); Index::open(index_dir) } /// Return a sequence of episodes for the given TV show IMDb identifier. /// /// The episodes are sorted in order of season number and episode number. /// Episodes without season/episode numbers are sorted after episodes with /// numbers. pub fn seasons(&self, tvshow_id: &[u8]) -> Result<Vec<Episode>> { let mut upper = tvshow_id.to_vec(); upper.push(0xFF); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(tvshow_id) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return a sequence of episodes for the given TV show IMDb identifier and /// season number. /// /// The episodes are sorted in order of episode number. Episodes without /// episode numbers are sorted after episodes with numbers. pub fn episodes( &self, tvshow_id: &[u8], season: u32, ) -> Result<Vec<Episode>> { let mut lower = tvshow_id.to_vec(); lower.push(0x00); lower.extend_from_slice(&u32_to_bytes(season)); lower.extend_from_slice(&u32_to_bytes(0)); let mut upper = tvshow_id.to_vec(); upper.push(0x00); upper.extend_from_slice(&u32_to_bytes(season)); upper.extend_from_slice(&u32_to_bytes(u32::MAX)); let mut episodes = vec![]; let mut stream = self.seasons.range() .ge(lower) .le(upper) .into_stream(); while let Some(episode_bytes) = stream.next() { episodes.push(read_episode(episode_bytes)?); } Ok(episodes) } /// Return the episode information for the given episode IMDb identifier. /// /// If no episode information for the given ID exists, then `None` is /// returned. pub fn episode(&self, episode_id: &[u8]) -> Result<Option<Episode>> { let mut upper = episode_id.to_vec(); upper.push(0xFF); let mut stream = self.tvshows.range() .ge(episode_id) .le(upper) .into_stream(); while let Some(tvshow_bytes) = stream.next() { return Ok(Some(read_tvshow(tvshow_bytes)?)); } Ok(None) } } fn read_sorted_episodes(data_dir: &Path) -> Result<Vec<Episode>> { // We claim it is safe to open the following memory map because we don't // mutate them and no other process (should) either. let mut rdr = csv_file(data_dir.join(IMDB_EPISODE))?; let mut records = vec![]; for result in rdr.deserialize() { let record: Episode = result.map_err(Error::csv)?; records.push(record); } records.sort_by(cmp_episode); Ok(records) } fn cmp_episode(ep1: &Episode, ep2: &Episode) -> cmp::Ordering { let k1 = ( &ep1.tvshow_id, ep1.season.unwrap_or(u32::MAX), ep1.episode.unwrap_or(u32::MAX), &ep1.id, ); let k2 = ( &ep2.tvshow_id, ep2.season.unwrap_or(u32::MAX), ep2.episode.unwrap_or(u32::MAX), &ep2.id, ); k1.cmp(&k2) } fn read_episode(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul, None => bug!("could not find nul byte"), }; let tvshow_id = match String::from_utf8(bytes[..nul].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(id) => id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_episode(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.tvshow_id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported tvshow id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.tvshow_id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.id.as_bytes()); Ok(()) } fn read_tvshow(bytes: &[u8]) -> Result<Episode> { let nul = match bytes.iter().position(|&b| b == 0) { Some(nul) => nul, None => bug!("could not find nul byte"), }; let id = match String::from_utf8(bytes[..nul].to_vec()) { Err(err) => bug!("episode id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; let mut i = nul + 1; let season = from_optional_u32(&bytes[i..]); i += 4; let epnum = from_optional_u32(&bytes[i..]); i += 4; let tvshow_id = match String::from_utf8(bytes[i..].to_vec()) { Err(err) => bug!("tvshow_id invalid UTF-8: {}", err), Ok(tvshow_id) => tvshow_id, }; Ok(Episode { id: id, tvshow_id: tvshow_id, season: season, episode: epnum, }) } fn write_tvshow(ep: &Episode, buf: &mut Vec<u8>) -> Result<()> { if ep.id.as_bytes().iter().any(|&b| b == 0) { bug!("unsupported episode id (with NUL byte) for {:?}", ep); } buf.extend_from_slice(ep.id.as_bytes()); buf.push(0x00); buf.extend_from_slice(&u32_to_bytes(to_optional_season(ep)?)); buf.extend_from_slice(&u32_to_bytes(to_optional_epnum(ep)?)); buf.extend_from_slice(ep.tvshow_id.as_bytes()); Ok(()) } fn from_optional_u32(bytes: &[u8]) -> Option<u32> { match BE::read_u32(bytes) { u32::MAX => None, x => Some(x), } } fn to_optional_season(ep: &Episode) -> Result<u32> { match ep.season { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported season number {} for {:?}", x, ep); } Ok(x) } } } fn
(ep: &Episode) -> Result<u32> { match ep.episode { None => Ok(u32::MAX), Some(x) => { if x == u32::MAX { bug!("unsupported episode number {} for {:?}", x, ep); } Ok(x) } } } fn u32_to_bytes(n: u32) -> [u8; 4] { let mut buf = [0u8; 4]; BE::write_u32(&mut buf, n); buf } #[cfg(test)] mod tests { use std::collections::HashMap; use crate::index::tests::TestContext; use super::Index; #[test] fn basics() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.seasons(b"tt0096697").unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } assert_eq!(counts.len(), 3); assert_eq!(counts[&1], 13); assert_eq!(counts[&2], 22); assert_eq!(counts[&3], 24); } #[test] fn by_season() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let eps = idx.episodes(b"tt0096697", 2).unwrap(); let mut counts: HashMap<u32, u32> = HashMap::new(); for ep in eps { *counts.entry(ep.season.unwrap()).or_insert(0) += 1; } println!("{:?}", counts); assert_eq!(counts.len(), 1); assert_eq!(counts[&2], 22); } #[test] fn tvshow() { let ctx = TestContext::new("small"); let idx = Index::create(ctx.data_dir(), ctx.index_dir()).unwrap(); let ep = idx.episode(b"tt0701063").unwrap().unwrap(); assert_eq!(ep.tvshow_id, "tt0096697"); } }
to_optional_epnum
identifier_name
main.rs
extern crate docopt; #[macro_use] extern crate serde_derive; use docopt::Docopt; #[macro_use] extern crate log; use log::{Level, LevelFilter, Metadata, Record}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader, Seek, SeekFrom}; use std::path::Path; static MY_LOGGER: SimpleLogger = SimpleLogger; struct SimpleLogger; impl log::Log for SimpleLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= Level::Info } fn log(&self, record: &Record) { if self.enabled(record.metadata()) { println!("{} - {}", record.level(), record.args()); } } fn flush(&self) {} } struct CsvDesc<'a> { file_path: &'a Path, delimiter: char, quote: Option<char>, } impl<'a> std::fmt::Display for CsvDesc<'a> { fn
(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{} {} {:?}", self.file_path.display(), self.delimiter, self.quote ) } } fn parse_args<'a>( path_arg: &'a String, delimiter_arg: &'a String, quote_arg: &'a String, ) -> Result<CsvDesc<'a>, &'static str> { let csv_file_path = Path::new(path_arg); let csv_delimiter = match delimiter_arg.chars().next() { Some(result) => result, None => return Err("incorrect delimiter"), }; let csv_quote = quote_arg.chars().next(); Ok(CsvDesc { file_path: &csv_file_path, delimiter: csv_delimiter, quote: csv_quote, }) } fn get_csv_cols(csv_desc: &CsvDesc) -> Result<Vec<String>, String> { let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let csv_header: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv header: {}", why)), Ok(header) => header, }, None => return Err("csv header reading failed".to_string()), }; let csv_cols: Vec<String> = { let cols_iter = csv_header.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(csv_cols) } fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> { let mut csv_index = HashMap::new(); let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let mut offset_in_file: u64 = 0; let mut expected_col_count = 0; let mut row_idx = 0; loop { let csv_row: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv row: {}", why)), Ok(header) => header, }, None => break, }; let csv_cols: Vec<String> = { let cols_iter = csv_row.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; let curr_col_count = csv_cols.len(); if curr_col_count == 0 { return Err(format!("zero columns in row{}", row_idx)); } if expected_col_count!= 0 && expected_col_count!= curr_col_count { return Err(format!( "{} columns in row #{}, {} expected", curr_col_count, row_idx, expected_col_count )); } expected_col_count = curr_col_count; row_idx += 1; let key = format!("{}{}", csv_cols[0], csv_cols[1]); csv_index.insert(key, offset_in_file); offset_in_file += (csv_row.len() + 1) as u64; } Ok(csv_index) } fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> { let mut csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) { return Err(format!("error seeking offset: {}", e)); } let mut csv_reader = BufReader::new(csv_file); let mut row_buff = String::new(); match csv_reader.read_line(&mut row_buff) { Ok(_n) => { if row_buff.ends_with("\n") { row_buff.pop(); } } Err(e) => return Err(format!("error gettig csv row: {}", e)), }; let result: Vec<String> = { let cols_iter = row_buff.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(result) } static USAGE: &'static str = " Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2> rusty-csv-diff --help Options: -h, --help Show this message. "; #[derive(Debug, Deserialize)] struct Args { arg_csv1: String, arg_delim1: String, arg_quote1: String, arg_csv2: String, arg_delim2: String, arg_quote2: String, } fn main() { /* 1. Parse arguments 2. Open CSV files 3. Get columns (cols_N) 4. Get intersection of those two sets of columns(cols_to_compare) 5. Create {column name : column index in cols_N} dicts 6. Create {CSV_col_value : CSV row index in file} dicts, where CSV_col_value is a unique key made of the value of several CSV columns. For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row. 7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare) 8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files 8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows 8.2 Compare values Input parameters: CSV paths, delimiters, quotes For example,./main file_1.csv "," "'" file_2.csv " " "" */ /*** 0 ***/ log::set_logger(&MY_LOGGER).unwrap(); log::set_max_level(LevelFilter::Error); /*** 1 ***/ let args: Args = Docopt::new(USAGE) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) { Err(why) => panic!("error parsing arguments for CSV #1: {}", why), Ok(result) => result, }; let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) { Err(why) => panic!("error parsing arguments for CSV #2: {}", why), Ok(result) => result, }; /*** 2&3 ***/ let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; /*** 5 ***/ let mut csv_col_index_1 = HashMap::new(); for i in 0..csv_cols_1.len() { let key = csv_cols_1[i].clone(); if csv_col_index_1.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_1.insert(key, i); } info!("{:?}", csv_col_index_1); let mut csv_col_index_2 = HashMap::new(); for i in 0..csv_cols_2.len() { let key = csv_cols_2[i].clone(); if csv_col_index_2.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_2.insert(key, i); } info!("{:?}", csv_col_index_2); /*** 4 ***/ let mut cols_to_compare = HashSet::new(); for col_1 in csv_col_index_1.keys() { if csv_col_index_2.contains_key(col_1) { cols_to_compare.insert(col_1); }; } info!("{:?}", cols_to_compare); /*** 6 ***/ // let's assume that the unique key is (col_0 + col_1) let csv_index_1 = match build_index(&csv_desc_1) { Err(why) => panic!("failed building index #1: {}", why), Ok(index) => index, }; let csv_index_2 = match build_index(&csv_desc_2) { Err(why) => panic!("failed building index #2: {}", why), Ok(index) => index, }; /*** 7 ***/ let mut row_keys_to_compare = HashSet::new(); for key_1 in csv_index_1.keys() { if csv_index_2.contains_key(key_1) { row_keys_to_compare.insert(key_1); }; } info!("{:?}", row_keys_to_compare); /*** 8 ***/ for row_key in row_keys_to_compare { let index_1 = *csv_index_1.get(row_key).unwrap(); let index_2 = *csv_index_2.get(row_key).unwrap(); let row_1 = match get_csv_row(&csv_desc_1, index_1) { Ok(row) => row, Err(e) => panic!("failed getting csv row #1: {}", e), }; let row_2 = match get_csv_row(&csv_desc_2, index_2) { Ok(row) => row, Err(e) => panic!("failed getting csv row #2: {}", e), }; info!("comparing {}:", row_key); info!("line #1: {:?}", row_1); info!("line #2: {:?}", row_2); for col in &cols_to_compare { let col_index_1 = *csv_col_index_1.get(*col).unwrap(); let col_index_2 = *csv_col_index_2.get(*col).unwrap(); info!( "column {}, index_1={}, index_2={}", col, col_index_1, col_index_2 ); if row_1[col_index_1]!= row_2[col_index_2] { println!( "found a difference for {}, {}: {} / {}", row_key, col, row_1[col_index_1], row_2[col_index_2] ); } } } }
fmt
identifier_name
main.rs
extern crate docopt; #[macro_use] extern crate serde_derive; use docopt::Docopt; #[macro_use] extern crate log; use log::{Level, LevelFilter, Metadata, Record}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader, Seek, SeekFrom}; use std::path::Path; static MY_LOGGER: SimpleLogger = SimpleLogger; struct SimpleLogger; impl log::Log for SimpleLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= Level::Info } fn log(&self, record: &Record) { if self.enabled(record.metadata()) { println!("{} - {}", record.level(), record.args()); } } fn flush(&self)
} struct CsvDesc<'a> { file_path: &'a Path, delimiter: char, quote: Option<char>, } impl<'a> std::fmt::Display for CsvDesc<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{} {} {:?}", self.file_path.display(), self.delimiter, self.quote ) } } fn parse_args<'a>( path_arg: &'a String, delimiter_arg: &'a String, quote_arg: &'a String, ) -> Result<CsvDesc<'a>, &'static str> { let csv_file_path = Path::new(path_arg); let csv_delimiter = match delimiter_arg.chars().next() { Some(result) => result, None => return Err("incorrect delimiter"), }; let csv_quote = quote_arg.chars().next(); Ok(CsvDesc { file_path: &csv_file_path, delimiter: csv_delimiter, quote: csv_quote, }) } fn get_csv_cols(csv_desc: &CsvDesc) -> Result<Vec<String>, String> { let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let csv_header: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv header: {}", why)), Ok(header) => header, }, None => return Err("csv header reading failed".to_string()), }; let csv_cols: Vec<String> = { let cols_iter = csv_header.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(csv_cols) } fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> { let mut csv_index = HashMap::new(); let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let mut offset_in_file: u64 = 0; let mut expected_col_count = 0; let mut row_idx = 0; loop { let csv_row: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv row: {}", why)), Ok(header) => header, }, None => break, }; let csv_cols: Vec<String> = { let cols_iter = csv_row.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; let curr_col_count = csv_cols.len(); if curr_col_count == 0 { return Err(format!("zero columns in row{}", row_idx)); } if expected_col_count!= 0 && expected_col_count!= curr_col_count { return Err(format!( "{} columns in row #{}, {} expected", curr_col_count, row_idx, expected_col_count )); } expected_col_count = curr_col_count; row_idx += 1; let key = format!("{}{}", csv_cols[0], csv_cols[1]); csv_index.insert(key, offset_in_file); offset_in_file += (csv_row.len() + 1) as u64; } Ok(csv_index) } fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> { let mut csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) { return Err(format!("error seeking offset: {}", e)); } let mut csv_reader = BufReader::new(csv_file); let mut row_buff = String::new(); match csv_reader.read_line(&mut row_buff) { Ok(_n) => { if row_buff.ends_with("\n") { row_buff.pop(); } } Err(e) => return Err(format!("error gettig csv row: {}", e)), }; let result: Vec<String> = { let cols_iter = row_buff.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(result) } static USAGE: &'static str = " Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2> rusty-csv-diff --help Options: -h, --help Show this message. "; #[derive(Debug, Deserialize)] struct Args { arg_csv1: String, arg_delim1: String, arg_quote1: String, arg_csv2: String, arg_delim2: String, arg_quote2: String, } fn main() { /* 1. Parse arguments 2. Open CSV files 3. Get columns (cols_N) 4. Get intersection of those two sets of columns(cols_to_compare) 5. Create {column name : column index in cols_N} dicts 6. Create {CSV_col_value : CSV row index in file} dicts, where CSV_col_value is a unique key made of the value of several CSV columns. For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row. 7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare) 8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files 8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows 8.2 Compare values Input parameters: CSV paths, delimiters, quotes For example,./main file_1.csv "," "'" file_2.csv " " "" */ /*** 0 ***/ log::set_logger(&MY_LOGGER).unwrap(); log::set_max_level(LevelFilter::Error); /*** 1 ***/ let args: Args = Docopt::new(USAGE) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) { Err(why) => panic!("error parsing arguments for CSV #1: {}", why), Ok(result) => result, }; let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) { Err(why) => panic!("error parsing arguments for CSV #2: {}", why), Ok(result) => result, }; /*** 2&3 ***/ let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; /*** 5 ***/ let mut csv_col_index_1 = HashMap::new(); for i in 0..csv_cols_1.len() { let key = csv_cols_1[i].clone(); if csv_col_index_1.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_1.insert(key, i); } info!("{:?}", csv_col_index_1); let mut csv_col_index_2 = HashMap::new(); for i in 0..csv_cols_2.len() { let key = csv_cols_2[i].clone(); if csv_col_index_2.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_2.insert(key, i); } info!("{:?}", csv_col_index_2); /*** 4 ***/ let mut cols_to_compare = HashSet::new(); for col_1 in csv_col_index_1.keys() { if csv_col_index_2.contains_key(col_1) { cols_to_compare.insert(col_1); }; } info!("{:?}", cols_to_compare); /*** 6 ***/ // let's assume that the unique key is (col_0 + col_1) let csv_index_1 = match build_index(&csv_desc_1) { Err(why) => panic!("failed building index #1: {}", why), Ok(index) => index, }; let csv_index_2 = match build_index(&csv_desc_2) { Err(why) => panic!("failed building index #2: {}", why), Ok(index) => index, }; /*** 7 ***/ let mut row_keys_to_compare = HashSet::new(); for key_1 in csv_index_1.keys() { if csv_index_2.contains_key(key_1) { row_keys_to_compare.insert(key_1); }; } info!("{:?}", row_keys_to_compare); /*** 8 ***/ for row_key in row_keys_to_compare { let index_1 = *csv_index_1.get(row_key).unwrap(); let index_2 = *csv_index_2.get(row_key).unwrap(); let row_1 = match get_csv_row(&csv_desc_1, index_1) { Ok(row) => row, Err(e) => panic!("failed getting csv row #1: {}", e), }; let row_2 = match get_csv_row(&csv_desc_2, index_2) { Ok(row) => row, Err(e) => panic!("failed getting csv row #2: {}", e), }; info!("comparing {}:", row_key); info!("line #1: {:?}", row_1); info!("line #2: {:?}", row_2); for col in &cols_to_compare { let col_index_1 = *csv_col_index_1.get(*col).unwrap(); let col_index_2 = *csv_col_index_2.get(*col).unwrap(); info!( "column {}, index_1={}, index_2={}", col, col_index_1, col_index_2 ); if row_1[col_index_1]!= row_2[col_index_2] { println!( "found a difference for {}, {}: {} / {}", row_key, col, row_1[col_index_1], row_2[col_index_2] ); } } } }
{}
identifier_body
main.rs
extern crate docopt; #[macro_use] extern crate serde_derive; use docopt::Docopt; #[macro_use] extern crate log; use log::{Level, LevelFilter, Metadata, Record}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::io::{BufRead, BufReader, Seek, SeekFrom}; use std::path::Path; static MY_LOGGER: SimpleLogger = SimpleLogger; struct SimpleLogger; impl log::Log for SimpleLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= Level::Info } fn log(&self, record: &Record) { if self.enabled(record.metadata()) { println!("{} - {}", record.level(), record.args()); } } fn flush(&self) {} } struct CsvDesc<'a> { file_path: &'a Path, delimiter: char, quote: Option<char>, } impl<'a> std::fmt::Display for CsvDesc<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!( f, "{} {} {:?}", self.file_path.display(), self.delimiter, self.quote ) } } fn parse_args<'a>( path_arg: &'a String, delimiter_arg: &'a String, quote_arg: &'a String, ) -> Result<CsvDesc<'a>, &'static str> { let csv_file_path = Path::new(path_arg); let csv_delimiter = match delimiter_arg.chars().next() { Some(result) => result, None => return Err("incorrect delimiter"), }; let csv_quote = quote_arg.chars().next(); Ok(CsvDesc { file_path: &csv_file_path, delimiter: csv_delimiter, quote: csv_quote, }) } fn get_csv_cols(csv_desc: &CsvDesc) -> Result<Vec<String>, String> { let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let csv_header: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv header: {}", why)), Ok(header) => header, }, None => return Err("csv header reading failed".to_string()), }; let csv_cols: Vec<String> = { let cols_iter = csv_header.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(csv_cols) } fn build_index(csv_desc: &CsvDesc) -> Result<HashMap<String, u64>, String> { let mut csv_index = HashMap::new(); let csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; let csv_reader = BufReader::new(csv_file); let mut csv_line_iter = csv_reader.lines(); let mut offset_in_file: u64 = 0; let mut expected_col_count = 0; let mut row_idx = 0; loop { let csv_row: String = match csv_line_iter.next() { Some(result) => match result { Err(why) => return Err(format!("error getting csv row: {}", why)), Ok(header) => header, }, None => break, }; let csv_cols: Vec<String> = { let cols_iter = csv_row.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; let curr_col_count = csv_cols.len(); if curr_col_count == 0 { return Err(format!("zero columns in row{}", row_idx)); } if expected_col_count!= 0 && expected_col_count!= curr_col_count { return Err(format!( "{} columns in row #{}, {} expected", curr_col_count, row_idx, expected_col_count )); } expected_col_count = curr_col_count; row_idx += 1; let key = format!("{}{}", csv_cols[0], csv_cols[1]); csv_index.insert(key, offset_in_file); offset_in_file += (csv_row.len() + 1) as u64; } Ok(csv_index) } fn get_csv_row(csv_desc: &CsvDesc, line_offset: u64) -> Result<Vec<String>, String> { let mut csv_file = match File::open(csv_desc.file_path) { Err(why) => panic!( "couldn't open csv @ {}: {}", csv_desc.file_path.display(), why ), Ok(file) => file, }; if let Err(e) = csv_file.seek(SeekFrom::Start(line_offset)) { return Err(format!("error seeking offset: {}", e)); } let mut csv_reader = BufReader::new(csv_file); let mut row_buff = String::new(); match csv_reader.read_line(&mut row_buff) { Ok(_n) => { if row_buff.ends_with("\n") { row_buff.pop(); } } Err(e) => return Err(format!("error gettig csv row: {}", e)), }; let result: Vec<String> = { let cols_iter = row_buff.split(csv_desc.delimiter); match csv_desc.quote { Some(q) => cols_iter.map(|s| s.trim_matches(q).to_string()).collect(), None => cols_iter.map(|s| s.to_string()).collect(), } }; Ok(result) } static USAGE: &'static str = " Usage: rusty-csv-diff <csv1> <delim1> <quote1> <csv2> <delim2> <quote2> rusty-csv-diff --help Options: -h, --help Show this message. "; #[derive(Debug, Deserialize)] struct Args { arg_csv1: String, arg_delim1: String, arg_quote1: String, arg_csv2: String, arg_delim2: String, arg_quote2: String, } fn main() { /* 1. Parse arguments 2. Open CSV files 3. Get columns (cols_N) 4. Get intersection of those two sets of columns(cols_to_compare) 5. Create {column name : column index in cols_N} dicts 6. Create {CSV_col_value : CSV row index in file} dicts, where CSV_col_value is a unique key made of the value of several CSV columns. For example, {Alex38 : 76}. Here the name and age form a unique key for the 76th CSV row. 7. Get intersection of key sets of dicts from step 6 (row_keys_to_compare) 8. Loop through row_keys_to_compare, use dicts from step 6 to get line numbers for CSV files 8.1 Loop through cols_to_compare, use dicts from step 5 to extract column values from CSV rows 8.2 Compare values Input parameters: CSV paths, delimiters, quotes For example,./main file_1.csv "," "'" file_2.csv " " "" */ /*** 0 ***/ log::set_logger(&MY_LOGGER).unwrap(); log::set_max_level(LevelFilter::Error); /*** 1 ***/ let args: Args = Docopt::new(USAGE) .and_then(|d| d.deserialize()) .unwrap_or_else(|e| e.exit()); let csv_desc_1: CsvDesc = match parse_args(&args.arg_csv1, &args.arg_delim1, &args.arg_quote1) { Err(why) => panic!("error parsing arguments for CSV #1: {}", why), Ok(result) => result, }; let csv_desc_2: CsvDesc = match parse_args(&args.arg_csv2, &args.arg_delim2, &args.arg_quote2) { Err(why) => panic!("error parsing arguments for CSV #2: {}", why), Ok(result) => result, }; /*** 2&3 ***/ let csv_cols_1: Vec<String> = match get_csv_cols(&csv_desc_1) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; let csv_cols_2: Vec<String> = match get_csv_cols(&csv_desc_2) { Err(why) => panic!("couldn't get columns: {}", why), Ok(cols) => cols, }; /*** 5 ***/ let mut csv_col_index_1 = HashMap::new(); for i in 0..csv_cols_1.len() { let key = csv_cols_1[i].clone(); if csv_col_index_1.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_1.insert(key, i); } info!("{:?}", csv_col_index_1); let mut csv_col_index_2 = HashMap::new(); for i in 0..csv_cols_2.len() { let key = csv_cols_2[i].clone(); if csv_col_index_2.contains_key(&key) { panic!("duplicate column found in CSV #1: {}", key); }; csv_col_index_2.insert(key, i); } info!("{:?}", csv_col_index_2); /*** 4 ***/ let mut cols_to_compare = HashSet::new(); for col_1 in csv_col_index_1.keys() { if csv_col_index_2.contains_key(col_1) { cols_to_compare.insert(col_1); }; } info!("{:?}", cols_to_compare); /*** 6 ***/ // let's assume that the unique key is (col_0 + col_1) let csv_index_1 = match build_index(&csv_desc_1) { Err(why) => panic!("failed building index #1: {}", why), Ok(index) => index, }; let csv_index_2 = match build_index(&csv_desc_2) { Err(why) => panic!("failed building index #2: {}", why), Ok(index) => index, }; /*** 7 ***/ let mut row_keys_to_compare = HashSet::new(); for key_1 in csv_index_1.keys() { if csv_index_2.contains_key(key_1) { row_keys_to_compare.insert(key_1); }; } info!("{:?}", row_keys_to_compare); /*** 8 ***/ for row_key in row_keys_to_compare { let index_1 = *csv_index_1.get(row_key).unwrap(); let index_2 = *csv_index_2.get(row_key).unwrap(); let row_1 = match get_csv_row(&csv_desc_1, index_1) { Ok(row) => row, Err(e) => panic!("failed getting csv row #1: {}", e), };
Err(e) => panic!("failed getting csv row #2: {}", e), }; info!("comparing {}:", row_key); info!("line #1: {:?}", row_1); info!("line #2: {:?}", row_2); for col in &cols_to_compare { let col_index_1 = *csv_col_index_1.get(*col).unwrap(); let col_index_2 = *csv_col_index_2.get(*col).unwrap(); info!( "column {}, index_1={}, index_2={}", col, col_index_1, col_index_2 ); if row_1[col_index_1]!= row_2[col_index_2] { println!( "found a difference for {}, {}: {} / {}", row_key, col, row_1[col_index_1], row_2[col_index_2] ); } } } }
let row_2 = match get_csv_row(&csv_desc_2, index_2) { Ok(row) => row,
random_line_split
verify.rs
use std::ffi::OsStr; use std::fmt::Write as _; use std::fs::File; use std::path::{Path, PathBuf}; use crate::cache::caches::RegistrySuperCache; use crate::cache::*; use crate::remove::remove_file; use flate2::read::GzDecoder; use rayon::iter::*; use tar::Archive; use walkdir::WalkDir; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct FileWithSize { path: PathBuf, size: u64, } // #113'verify' incorrectly determines paths as missing due to different unicode representations. fn normalized(path: PathBuf) -> PathBuf { use unicode_normalization::{is_nfkc, UnicodeNormalization}; match path.to_str() { Some(path) if!is_nfkc(path) => path.chars().nfc().collect::<String>().into(), _ => path, } } impl FileWithSize { fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self { // we need to cut off.cargo/registry/src/github.com-1ecc6299db9ec823/ let index = path_orig .iter() .enumerate() .position(|e| e.1 == krate_root) .expect("must find cargo root in path contained within it"); let path = path_orig.iter().skip(index).collect::<PathBuf>(); FileWithSize { path: normalized(path), size: std::fs::metadata(path_orig).unwrap().len(), } } // TODO: understand this R: Read stuff fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self { FileWithSize { path: normalized(entry.path().unwrap().into_owned()), size: entry.size(), } } } /// Size difference of a file in the.gz archive and extracted source #[derive(Debug, Clone)] pub(crate) struct FileSizeDifference { path: PathBuf, size_archive: u64, size_source: u64, } /// The Difference between extracted crate sources and an.crate tar.gz archive #[derive(Debug, Clone)] pub(crate) struct Diff { // the crate we are diffing krate_name: String, files_missing_in_checkout: Vec<PathBuf>, additional_files_in_checkout: Vec<PathBuf>, files_size_difference: Vec<FileSizeDifference>, source_path: Option<PathBuf>, } impl Diff { fn new() -> Self { Self { krate_name: String::new(), files_missing_in_checkout: Vec::new(), additional_files_in_checkout: Vec::new(), files_size_difference: Vec::new(), source_path: None, } } /// returns true if there is no diff fn is_ok(&self) -> bool { self.files_missing_in_checkout.is_empty() && self.additional_files_in_checkout.is_empty() && self.files_size_difference.is_empty() } pub(crate) fn details(&self) -> String { let mut s = format!("Crate: {}\n", self.krate_name); if!self.files_missing_in_checkout.is_empty() { write!( s, "Missing from source:\n{}", self.files_missing_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.additional_files_in_checkout.is_empty() { write!( s, "Not found in archive/additional:\n{}", self.additional_files_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.files_size_difference.is_empty() { self.files_size_difference .iter() .map(|fsd| { format!( "File: {}, size in archive: {}b, size in checkout: {}b\n", fsd.path.display(), fsd.size_archive, fsd.size_source ) }) .for_each(|strg| s.push_str(&strg)); } s } } /// take a path to an extracted.crate source and map it to the corresponding.carte archive path fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf
dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate dir.into_iter().collect::<PathBuf>() } /// look into the.gz archive and get all the contained files+sizes fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> { let tar_gz = File::open(path).unwrap(); // extract the tar let tar = GzDecoder::new(tar_gz); let mut archive = Archive::new(tar); let archive_files = archive.entries().unwrap(); // println!("files inside the archive"); // archive_files.for_each(|x| println!("{:?}", x.unwrap().path())); archive_files .into_iter() .map(|entry| FileWithSize::from_archive(&entry.unwrap())) .collect::<Vec<FileWithSize>>() } /// get the files and their sizes of the extracted.crate sources fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> { let krate_root = source.iter().last().unwrap(); WalkDir::new(source) .into_iter() .map(Result::unwrap) // need to skip directories since the are only implicitly inside the tar (via file paths) .filter(|de| de.file_type().is_file()) .map(|direntry| { let p = direntry.path(); p.to_owned() }) .map(|p| FileWithSize::from_disk(&p, krate_root)) .collect() } /// compare files of a.crate gz archive and extracted sources and return a Diff object which describes those changes fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff { let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate); let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source); let mut diff = Diff::new(); diff.source_path = Some(source.to_path_buf()); diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string(); let files_of_source_paths: Vec<&PathBuf> = files_of_source.iter().map(|fws| &fws.path).collect(); for archive_file in &files_of_archive { let archive_f_path = &archive_file.path; if!files_of_source_paths.contains(&archive_f_path) { // the file is contaied in the archive but not in the extracted source diff.files_missing_in_checkout.push(archive_f_path.clone()); } else if files_of_source_paths.contains(&archive_f_path) { // file is contained in both, but sizes differ match files_of_source .iter() .find(|fws| fws.path == archive_file.path) { Some(fws) => { if fws.size!= archive_file.size { diff.files_size_difference.push(FileSizeDifference { path: fws.path.clone(), size_archive: archive_file.size, size_source: fws.size, }); } } None => unreachable!(), // we already checked this }; } } let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect(); for source_file in files_of_source_paths .iter() .filter(|path| path.file_name().unwrap()!= ".cargo-ok") .filter(|path|!path.is_dir() /* skip dirs */) { // dbg!(source_file); #[allow(clippy::implicit_clone)] if!files_of_archive.iter().any(|path| path == source_file) { diff.additional_files_in_checkout .push(source_file.to_path_buf()); } } diff } pub(crate) fn verify_crates( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, ) -> Result<(), Vec<Diff>> { // iterate over all the extracted sources that we have let bad_sources: Vec<_> = registry_sources_caches .items() .par_iter() // get the paths to the source and the.crate for all extracted crates .map(|source| (source, map_src_path_to_cache_path(source))) // we need both the.crate and the directory to exist for verification .filter(|(source, krate)| source.exists() && krate.exists()) // look into the.gz archive and get all the contained files+sizes .map(|(source, krate)| diff_crate_and_source(&krate, source)) // save only the "bad" packages .filter(|diff|!diff.is_ok()) .map(|diff| { eprintln!("Possibly corrupted source: {}", diff.krate_name); diff }) .collect::<Vec<_>>(); if bad_sources.is_empty() { Ok(()) } else { Err(bad_sources) } } pub(crate) fn clean_corrupted( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, diff_list: &[Diff], dry_run: bool, ) { // hack because we need a &mut bool in remove_file() let mut bool = false; diff_list .iter() .filter_map(|diff| diff.source_path.as_ref()) .filter(|path| path.is_dir()) .for_each(|path| { remove_file( path, dry_run, &mut bool, Some(format!("removing corrupted source: {}", path.display())), &crate::remove::DryRunMessage::Default, // we don't print a summary or anything (yet..) None, ); }); // just in case registry_sources_caches.invalidate(); } #[cfg(test)] mod verification_tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_map_src_path_to_cache_path() { let old_src_path = PathBuf::from( "/home/matthias/.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12", ); let new_archive_path = PathBuf::from( "/home/matthias/.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate", ); let new = map_src_path_to_cache_path(&old_src_path); assert_eq!(new, new_archive_path); } }
{ // for each directory, find the path to the corresponding .crate archive // .cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12 // corresponds to // .cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate // reverse, and "pop" the front components let mut dir = src_path.iter().collect::<Vec<&OsStr>>(); let comp1 = dir.pop().unwrap(); // /bytes-0.4.12 let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823 let _src = dir.pop().unwrap(); // throw this away and add "cache" instead // reconstruct the fixed path in reverse order dir.push(OsStr::new("cache")); dir.push(comp2); // github.com... // we need to add the .crate extension (path to the gzip archive) let mut comp1_with_crate_ext = comp1.to_os_string(); comp1_with_crate_ext.push(".crate");
identifier_body
verify.rs
use std::ffi::OsStr; use std::fmt::Write as _; use std::fs::File; use std::path::{Path, PathBuf}; use crate::cache::caches::RegistrySuperCache; use crate::cache::*; use crate::remove::remove_file; use flate2::read::GzDecoder; use rayon::iter::*; use tar::Archive; use walkdir::WalkDir; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct FileWithSize { path: PathBuf, size: u64, } // #113'verify' incorrectly determines paths as missing due to different unicode representations. fn normalized(path: PathBuf) -> PathBuf { use unicode_normalization::{is_nfkc, UnicodeNormalization}; match path.to_str() { Some(path) if!is_nfkc(path) => path.chars().nfc().collect::<String>().into(), _ => path, } } impl FileWithSize { fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self { // we need to cut off.cargo/registry/src/github.com-1ecc6299db9ec823/ let index = path_orig .iter() .enumerate() .position(|e| e.1 == krate_root) .expect("must find cargo root in path contained within it"); let path = path_orig.iter().skip(index).collect::<PathBuf>(); FileWithSize { path: normalized(path), size: std::fs::metadata(path_orig).unwrap().len(), } } // TODO: understand this R: Read stuff fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self { FileWithSize { path: normalized(entry.path().unwrap().into_owned()), size: entry.size(), } } } /// Size difference of a file in the.gz archive and extracted source #[derive(Debug, Clone)] pub(crate) struct FileSizeDifference { path: PathBuf, size_archive: u64, size_source: u64, } /// The Difference between extracted crate sources and an.crate tar.gz archive #[derive(Debug, Clone)] pub(crate) struct Diff { // the crate we are diffing krate_name: String, files_missing_in_checkout: Vec<PathBuf>, additional_files_in_checkout: Vec<PathBuf>, files_size_difference: Vec<FileSizeDifference>, source_path: Option<PathBuf>, } impl Diff { fn new() -> Self { Self { krate_name: String::new(), files_missing_in_checkout: Vec::new(), additional_files_in_checkout: Vec::new(), files_size_difference: Vec::new(), source_path: None, } } /// returns true if there is no diff fn is_ok(&self) -> bool { self.files_missing_in_checkout.is_empty() && self.additional_files_in_checkout.is_empty() && self.files_size_difference.is_empty() } pub(crate) fn details(&self) -> String { let mut s = format!("Crate: {}\n", self.krate_name); if!self.files_missing_in_checkout.is_empty() { write!( s, "Missing from source:\n{}", self.files_missing_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.additional_files_in_checkout.is_empty() { write!( s, "Not found in archive/additional:\n{}", self.additional_files_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.files_size_difference.is_empty() { self.files_size_difference .iter() .map(|fsd| { format!( "File: {}, size in archive: {}b, size in checkout: {}b\n", fsd.path.display(), fsd.size_archive, fsd.size_source ) }) .for_each(|strg| s.push_str(&strg)); } s } } /// take a path to an extracted.crate source and map it to the corresponding.carte archive path fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf { // for each directory, find the path to the corresponding.crate archive //.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12 // corresponds to //.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate // reverse, and "pop" the front components let mut dir = src_path.iter().collect::<Vec<&OsStr>>(); let comp1 = dir.pop().unwrap(); // /bytes-0.4.12 let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823 let _src = dir.pop().unwrap(); // throw this away and add "cache" instead // reconstruct the fixed path in reverse order dir.push(OsStr::new("cache")); dir.push(comp2); // github.com... // we need to add the.crate extension (path to the gzip archive) let mut comp1_with_crate_ext = comp1.to_os_string(); comp1_with_crate_ext.push(".crate"); dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate dir.into_iter().collect::<PathBuf>() } /// look into the.gz archive and get all the contained files+sizes fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> { let tar_gz = File::open(path).unwrap(); // extract the tar let tar = GzDecoder::new(tar_gz); let mut archive = Archive::new(tar); let archive_files = archive.entries().unwrap(); // println!("files inside the archive"); // archive_files.for_each(|x| println!("{:?}", x.unwrap().path())); archive_files .into_iter() .map(|entry| FileWithSize::from_archive(&entry.unwrap())) .collect::<Vec<FileWithSize>>() } /// get the files and their sizes of the extracted.crate sources fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> { let krate_root = source.iter().last().unwrap(); WalkDir::new(source) .into_iter() .map(Result::unwrap) // need to skip directories since the are only implicitly inside the tar (via file paths) .filter(|de| de.file_type().is_file()) .map(|direntry| { let p = direntry.path(); p.to_owned() }) .map(|p| FileWithSize::from_disk(&p, krate_root)) .collect() } /// compare files of a.crate gz archive and extracted sources and return a Diff object which describes those changes fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff { let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate); let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source); let mut diff = Diff::new(); diff.source_path = Some(source.to_path_buf()); diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string(); let files_of_source_paths: Vec<&PathBuf> = files_of_source.iter().map(|fws| &fws.path).collect(); for archive_file in &files_of_archive { let archive_f_path = &archive_file.path; if!files_of_source_paths.contains(&archive_f_path) { // the file is contaied in the archive but not in the extracted source diff.files_missing_in_checkout.push(archive_f_path.clone()); } else if files_of_source_paths.contains(&archive_f_path) { // file is contained in both, but sizes differ match files_of_source .iter() .find(|fws| fws.path == archive_file.path)
diff.files_size_difference.push(FileSizeDifference { path: fws.path.clone(), size_archive: archive_file.size, size_source: fws.size, }); } } None => unreachable!(), // we already checked this }; } } let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect(); for source_file in files_of_source_paths .iter() .filter(|path| path.file_name().unwrap()!= ".cargo-ok") .filter(|path|!path.is_dir() /* skip dirs */) { // dbg!(source_file); #[allow(clippy::implicit_clone)] if!files_of_archive.iter().any(|path| path == source_file) { diff.additional_files_in_checkout .push(source_file.to_path_buf()); } } diff } pub(crate) fn verify_crates( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, ) -> Result<(), Vec<Diff>> { // iterate over all the extracted sources that we have let bad_sources: Vec<_> = registry_sources_caches .items() .par_iter() // get the paths to the source and the.crate for all extracted crates .map(|source| (source, map_src_path_to_cache_path(source))) // we need both the.crate and the directory to exist for verification .filter(|(source, krate)| source.exists() && krate.exists()) // look into the.gz archive and get all the contained files+sizes .map(|(source, krate)| diff_crate_and_source(&krate, source)) // save only the "bad" packages .filter(|diff|!diff.is_ok()) .map(|diff| { eprintln!("Possibly corrupted source: {}", diff.krate_name); diff }) .collect::<Vec<_>>(); if bad_sources.is_empty() { Ok(()) } else { Err(bad_sources) } } pub(crate) fn clean_corrupted( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, diff_list: &[Diff], dry_run: bool, ) { // hack because we need a &mut bool in remove_file() let mut bool = false; diff_list .iter() .filter_map(|diff| diff.source_path.as_ref()) .filter(|path| path.is_dir()) .for_each(|path| { remove_file( path, dry_run, &mut bool, Some(format!("removing corrupted source: {}", path.display())), &crate::remove::DryRunMessage::Default, // we don't print a summary or anything (yet..) None, ); }); // just in case registry_sources_caches.invalidate(); } #[cfg(test)] mod verification_tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_map_src_path_to_cache_path() { let old_src_path = PathBuf::from( "/home/matthias/.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12", ); let new_archive_path = PathBuf::from( "/home/matthias/.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate", ); let new = map_src_path_to_cache_path(&old_src_path); assert_eq!(new, new_archive_path); } }
{ Some(fws) => { if fws.size != archive_file.size {
random_line_split
verify.rs
use std::ffi::OsStr; use std::fmt::Write as _; use std::fs::File; use std::path::{Path, PathBuf}; use crate::cache::caches::RegistrySuperCache; use crate::cache::*; use crate::remove::remove_file; use flate2::read::GzDecoder; use rayon::iter::*; use tar::Archive; use walkdir::WalkDir; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct FileWithSize { path: PathBuf, size: u64, } // #113'verify' incorrectly determines paths as missing due to different unicode representations. fn normalized(path: PathBuf) -> PathBuf { use unicode_normalization::{is_nfkc, UnicodeNormalization}; match path.to_str() { Some(path) if!is_nfkc(path) => path.chars().nfc().collect::<String>().into(), _ => path, } } impl FileWithSize { fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self { // we need to cut off.cargo/registry/src/github.com-1ecc6299db9ec823/ let index = path_orig .iter() .enumerate() .position(|e| e.1 == krate_root) .expect("must find cargo root in path contained within it"); let path = path_orig.iter().skip(index).collect::<PathBuf>(); FileWithSize { path: normalized(path), size: std::fs::metadata(path_orig).unwrap().len(), } } // TODO: understand this R: Read stuff fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self { FileWithSize { path: normalized(entry.path().unwrap().into_owned()), size: entry.size(), } } } /// Size difference of a file in the.gz archive and extracted source #[derive(Debug, Clone)] pub(crate) struct FileSizeDifference { path: PathBuf, size_archive: u64, size_source: u64, } /// The Difference between extracted crate sources and an.crate tar.gz archive #[derive(Debug, Clone)] pub(crate) struct Diff { // the crate we are diffing krate_name: String, files_missing_in_checkout: Vec<PathBuf>, additional_files_in_checkout: Vec<PathBuf>, files_size_difference: Vec<FileSizeDifference>, source_path: Option<PathBuf>, } impl Diff { fn new() -> Self { Self { krate_name: String::new(), files_missing_in_checkout: Vec::new(), additional_files_in_checkout: Vec::new(), files_size_difference: Vec::new(), source_path: None, } } /// returns true if there is no diff fn is_ok(&self) -> bool { self.files_missing_in_checkout.is_empty() && self.additional_files_in_checkout.is_empty() && self.files_size_difference.is_empty() } pub(crate) fn details(&self) -> String { let mut s = format!("Crate: {}\n", self.krate_name); if!self.files_missing_in_checkout.is_empty() { write!( s, "Missing from source:\n{}", self.files_missing_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.additional_files_in_checkout.is_empty() { write!( s, "Not found in archive/additional:\n{}", self.additional_files_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.files_size_difference.is_empty() { self.files_size_difference .iter() .map(|fsd| { format!( "File: {}, size in archive: {}b, size in checkout: {}b\n", fsd.path.display(), fsd.size_archive, fsd.size_source ) }) .for_each(|strg| s.push_str(&strg)); } s } } /// take a path to an extracted.crate source and map it to the corresponding.carte archive path fn map_src_path_to_cache_path(src_path: &Path) -> PathBuf { // for each directory, find the path to the corresponding.crate archive //.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12 // corresponds to //.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate // reverse, and "pop" the front components let mut dir = src_path.iter().collect::<Vec<&OsStr>>(); let comp1 = dir.pop().unwrap(); // /bytes-0.4.12 let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823 let _src = dir.pop().unwrap(); // throw this away and add "cache" instead // reconstruct the fixed path in reverse order dir.push(OsStr::new("cache")); dir.push(comp2); // github.com... // we need to add the.crate extension (path to the gzip archive) let mut comp1_with_crate_ext = comp1.to_os_string(); comp1_with_crate_ext.push(".crate"); dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate dir.into_iter().collect::<PathBuf>() } /// look into the.gz archive and get all the contained files+sizes fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> { let tar_gz = File::open(path).unwrap(); // extract the tar let tar = GzDecoder::new(tar_gz); let mut archive = Archive::new(tar); let archive_files = archive.entries().unwrap(); // println!("files inside the archive"); // archive_files.for_each(|x| println!("{:?}", x.unwrap().path())); archive_files .into_iter() .map(|entry| FileWithSize::from_archive(&entry.unwrap())) .collect::<Vec<FileWithSize>>() } /// get the files and their sizes of the extracted.crate sources fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> { let krate_root = source.iter().last().unwrap(); WalkDir::new(source) .into_iter() .map(Result::unwrap) // need to skip directories since the are only implicitly inside the tar (via file paths) .filter(|de| de.file_type().is_file()) .map(|direntry| { let p = direntry.path(); p.to_owned() }) .map(|p| FileWithSize::from_disk(&p, krate_root)) .collect() } /// compare files of a.crate gz archive and extracted sources and return a Diff object which describes those changes fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff { let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate); let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source); let mut diff = Diff::new(); diff.source_path = Some(source.to_path_buf()); diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string(); let files_of_source_paths: Vec<&PathBuf> = files_of_source.iter().map(|fws| &fws.path).collect(); for archive_file in &files_of_archive { let archive_f_path = &archive_file.path; if!files_of_source_paths.contains(&archive_f_path) { // the file is contaied in the archive but not in the extracted source diff.files_missing_in_checkout.push(archive_f_path.clone()); } else if files_of_source_paths.contains(&archive_f_path) { // file is contained in both, but sizes differ match files_of_source .iter() .find(|fws| fws.path == archive_file.path) { Some(fws) =>
None => unreachable!(), // we already checked this }; } } let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect(); for source_file in files_of_source_paths .iter() .filter(|path| path.file_name().unwrap()!= ".cargo-ok") .filter(|path|!path.is_dir() /* skip dirs */) { // dbg!(source_file); #[allow(clippy::implicit_clone)] if!files_of_archive.iter().any(|path| path == source_file) { diff.additional_files_in_checkout .push(source_file.to_path_buf()); } } diff } pub(crate) fn verify_crates( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, ) -> Result<(), Vec<Diff>> { // iterate over all the extracted sources that we have let bad_sources: Vec<_> = registry_sources_caches .items() .par_iter() // get the paths to the source and the.crate for all extracted crates .map(|source| (source, map_src_path_to_cache_path(source))) // we need both the.crate and the directory to exist for verification .filter(|(source, krate)| source.exists() && krate.exists()) // look into the.gz archive and get all the contained files+sizes .map(|(source, krate)| diff_crate_and_source(&krate, source)) // save only the "bad" packages .filter(|diff|!diff.is_ok()) .map(|diff| { eprintln!("Possibly corrupted source: {}", diff.krate_name); diff }) .collect::<Vec<_>>(); if bad_sources.is_empty() { Ok(()) } else { Err(bad_sources) } } pub(crate) fn clean_corrupted( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, diff_list: &[Diff], dry_run: bool, ) { // hack because we need a &mut bool in remove_file() let mut bool = false; diff_list .iter() .filter_map(|diff| diff.source_path.as_ref()) .filter(|path| path.is_dir()) .for_each(|path| { remove_file( path, dry_run, &mut bool, Some(format!("removing corrupted source: {}", path.display())), &crate::remove::DryRunMessage::Default, // we don't print a summary or anything (yet..) None, ); }); // just in case registry_sources_caches.invalidate(); } #[cfg(test)] mod verification_tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_map_src_path_to_cache_path() { let old_src_path = PathBuf::from( "/home/matthias/.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12", ); let new_archive_path = PathBuf::from( "/home/matthias/.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate", ); let new = map_src_path_to_cache_path(&old_src_path); assert_eq!(new, new_archive_path); } }
{ if fws.size != archive_file.size { diff.files_size_difference.push(FileSizeDifference { path: fws.path.clone(), size_archive: archive_file.size, size_source: fws.size, }); } }
conditional_block
verify.rs
use std::ffi::OsStr; use std::fmt::Write as _; use std::fs::File; use std::path::{Path, PathBuf}; use crate::cache::caches::RegistrySuperCache; use crate::cache::*; use crate::remove::remove_file; use flate2::read::GzDecoder; use rayon::iter::*; use tar::Archive; use walkdir::WalkDir; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] struct FileWithSize { path: PathBuf, size: u64, } // #113'verify' incorrectly determines paths as missing due to different unicode representations. fn normalized(path: PathBuf) -> PathBuf { use unicode_normalization::{is_nfkc, UnicodeNormalization}; match path.to_str() { Some(path) if!is_nfkc(path) => path.chars().nfc().collect::<String>().into(), _ => path, } } impl FileWithSize { fn from_disk(path_orig: &Path, krate_root: &OsStr) -> Self { // we need to cut off.cargo/registry/src/github.com-1ecc6299db9ec823/ let index = path_orig .iter() .enumerate() .position(|e| e.1 == krate_root) .expect("must find cargo root in path contained within it"); let path = path_orig.iter().skip(index).collect::<PathBuf>(); FileWithSize { path: normalized(path), size: std::fs::metadata(path_orig).unwrap().len(), } } // TODO: understand this R: Read stuff fn from_archive<R: std::io::Read>(entry: &tar::Entry<'_, R>) -> Self { FileWithSize { path: normalized(entry.path().unwrap().into_owned()), size: entry.size(), } } } /// Size difference of a file in the.gz archive and extracted source #[derive(Debug, Clone)] pub(crate) struct FileSizeDifference { path: PathBuf, size_archive: u64, size_source: u64, } /// The Difference between extracted crate sources and an.crate tar.gz archive #[derive(Debug, Clone)] pub(crate) struct Diff { // the crate we are diffing krate_name: String, files_missing_in_checkout: Vec<PathBuf>, additional_files_in_checkout: Vec<PathBuf>, files_size_difference: Vec<FileSizeDifference>, source_path: Option<PathBuf>, } impl Diff { fn new() -> Self { Self { krate_name: String::new(), files_missing_in_checkout: Vec::new(), additional_files_in_checkout: Vec::new(), files_size_difference: Vec::new(), source_path: None, } } /// returns true if there is no diff fn is_ok(&self) -> bool { self.files_missing_in_checkout.is_empty() && self.additional_files_in_checkout.is_empty() && self.files_size_difference.is_empty() } pub(crate) fn details(&self) -> String { let mut s = format!("Crate: {}\n", self.krate_name); if!self.files_missing_in_checkout.is_empty() { write!( s, "Missing from source:\n{}", self.files_missing_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.additional_files_in_checkout.is_empty() { write!( s, "Not found in archive/additional:\n{}", self.additional_files_in_checkout .iter() .map(|path| path.display().to_string()) .collect::<Vec<String>>() .join(", ") ) .unwrap(); s.push('\n'); } if!self.files_size_difference.is_empty() { self.files_size_difference .iter() .map(|fsd| { format!( "File: {}, size in archive: {}b, size in checkout: {}b\n", fsd.path.display(), fsd.size_archive, fsd.size_source ) }) .for_each(|strg| s.push_str(&strg)); } s } } /// take a path to an extracted.crate source and map it to the corresponding.carte archive path fn
(src_path: &Path) -> PathBuf { // for each directory, find the path to the corresponding.crate archive //.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12 // corresponds to //.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate // reverse, and "pop" the front components let mut dir = src_path.iter().collect::<Vec<&OsStr>>(); let comp1 = dir.pop().unwrap(); // /bytes-0.4.12 let comp2 = dir.pop().unwrap(); // github.com-1ecc6299db9ec823 let _src = dir.pop().unwrap(); // throw this away and add "cache" instead // reconstruct the fixed path in reverse order dir.push(OsStr::new("cache")); dir.push(comp2); // github.com... // we need to add the.crate extension (path to the gzip archive) let mut comp1_with_crate_ext = comp1.to_os_string(); comp1_with_crate_ext.push(".crate"); dir.push(&comp1_with_crate_ext); // bytes-0.4.12.crate dir.into_iter().collect::<PathBuf>() } /// look into the.gz archive and get all the contained files+sizes fn sizes_of_archive_files(path: &Path) -> Vec<FileWithSize> { let tar_gz = File::open(path).unwrap(); // extract the tar let tar = GzDecoder::new(tar_gz); let mut archive = Archive::new(tar); let archive_files = archive.entries().unwrap(); // println!("files inside the archive"); // archive_files.for_each(|x| println!("{:?}", x.unwrap().path())); archive_files .into_iter() .map(|entry| FileWithSize::from_archive(&entry.unwrap())) .collect::<Vec<FileWithSize>>() } /// get the files and their sizes of the extracted.crate sources fn sizes_of_src_dir(source: &Path) -> Vec<FileWithSize> { let krate_root = source.iter().last().unwrap(); WalkDir::new(source) .into_iter() .map(Result::unwrap) // need to skip directories since the are only implicitly inside the tar (via file paths) .filter(|de| de.file_type().is_file()) .map(|direntry| { let p = direntry.path(); p.to_owned() }) .map(|p| FileWithSize::from_disk(&p, krate_root)) .collect() } /// compare files of a.crate gz archive and extracted sources and return a Diff object which describes those changes fn diff_crate_and_source(krate: &Path, source: &Path) -> Diff { let files_of_archive: Vec<FileWithSize> = sizes_of_archive_files(krate); let files_of_source: Vec<FileWithSize> = sizes_of_src_dir(source); let mut diff = Diff::new(); diff.source_path = Some(source.to_path_buf()); diff.krate_name = source.iter().last().unwrap().to_str().unwrap().to_string(); let files_of_source_paths: Vec<&PathBuf> = files_of_source.iter().map(|fws| &fws.path).collect(); for archive_file in &files_of_archive { let archive_f_path = &archive_file.path; if!files_of_source_paths.contains(&archive_f_path) { // the file is contaied in the archive but not in the extracted source diff.files_missing_in_checkout.push(archive_f_path.clone()); } else if files_of_source_paths.contains(&archive_f_path) { // file is contained in both, but sizes differ match files_of_source .iter() .find(|fws| fws.path == archive_file.path) { Some(fws) => { if fws.size!= archive_file.size { diff.files_size_difference.push(FileSizeDifference { path: fws.path.clone(), size_archive: archive_file.size, size_source: fws.size, }); } } None => unreachable!(), // we already checked this }; } } let files_of_archive: Vec<&PathBuf> = files_of_archive.iter().map(|fws| &fws.path).collect(); for source_file in files_of_source_paths .iter() .filter(|path| path.file_name().unwrap()!= ".cargo-ok") .filter(|path|!path.is_dir() /* skip dirs */) { // dbg!(source_file); #[allow(clippy::implicit_clone)] if!files_of_archive.iter().any(|path| path == source_file) { diff.additional_files_in_checkout .push(source_file.to_path_buf()); } } diff } pub(crate) fn verify_crates( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, ) -> Result<(), Vec<Diff>> { // iterate over all the extracted sources that we have let bad_sources: Vec<_> = registry_sources_caches .items() .par_iter() // get the paths to the source and the.crate for all extracted crates .map(|source| (source, map_src_path_to_cache_path(source))) // we need both the.crate and the directory to exist for verification .filter(|(source, krate)| source.exists() && krate.exists()) // look into the.gz archive and get all the contained files+sizes .map(|(source, krate)| diff_crate_and_source(&krate, source)) // save only the "bad" packages .filter(|diff|!diff.is_ok()) .map(|diff| { eprintln!("Possibly corrupted source: {}", diff.krate_name); diff }) .collect::<Vec<_>>(); if bad_sources.is_empty() { Ok(()) } else { Err(bad_sources) } } pub(crate) fn clean_corrupted( registry_sources_caches: &mut registry_sources::RegistrySourceCaches, diff_list: &[Diff], dry_run: bool, ) { // hack because we need a &mut bool in remove_file() let mut bool = false; diff_list .iter() .filter_map(|diff| diff.source_path.as_ref()) .filter(|path| path.is_dir()) .for_each(|path| { remove_file( path, dry_run, &mut bool, Some(format!("removing corrupted source: {}", path.display())), &crate::remove::DryRunMessage::Default, // we don't print a summary or anything (yet..) None, ); }); // just in case registry_sources_caches.invalidate(); } #[cfg(test)] mod verification_tests { use super::*; use pretty_assertions::assert_eq; #[test] fn test_map_src_path_to_cache_path() { let old_src_path = PathBuf::from( "/home/matthias/.cargo/registry/src/github.com-1ecc6299db9ec823/bytes-0.4.12", ); let new_archive_path = PathBuf::from( "/home/matthias/.cargo/registry/cache/github.com-1ecc6299db9ec823/bytes-0.4.12.crate", ); let new = map_src_path_to_cache_path(&old_src_path); assert_eq!(new, new_archive_path); } }
map_src_path_to_cache_path
identifier_name
load.rs
: Instant, } impl MasterCommitCache { /// Download the master-branch Rust commit list pub async fn download() -> anyhow::Result<Self> { let commits = collector::master_commits().await?; Ok(Self { commits, updated: Instant::now(), }) } } /// Site context object that contains global data pub struct SiteCtxt { /// Site configuration pub config: Config, /// Cached site landing page pub landing_page: ArcSwap<Option<Arc<crate::api::graphs::Response>>>, /// Index of various common queries pub index: ArcSwap<crate::db::Index>, /// Cached master-branch Rust commits pub master_commits: Arc<ArcSwap<MasterCommitCache>>, // outer Arc enables mutation in background task /// Database connection pool pub pool: Pool, } impl SiteCtxt { pub fn summary_scenarios(&self) -> Vec<crate::db::Scenario> { vec![ crate::db::Scenario::Empty, crate::db::Scenario::IncrementalEmpty, crate::db::Scenario::IncrementalFresh, crate::db::Scenario::IncrementalPatch("println".into()), ] } pub fn artifact_id_for_bound(&self, query: Bound, is_left: bool) -> Option<ArtifactId> { crate::selector::artifact_id_for_bound(&self.index.load(), query, is_left) } pub fn data_range(&self, range: RangeInclusive<Bound>) -> Vec<Commit> { crate::selector::range_subset(self.index.load().commits(), range) } /// Initialize `SiteCtxt` from database url pub async fn from_db_url(db_url: &str) -> anyhow::Result<Self> { let pool = Pool::open(db_url); let mut conn = pool.connection().await; let index = db::Index::load(&mut *conn).await; let config = if let Ok(s) = fs::read_to_string("site-config.toml") { toml::from_str(&s)? } else { Config { keys: Keys { github_api_token: std::env::var("GITHUB_API_TOKEN").ok(), github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(), }, } }; let master_commits = MasterCommitCache::download().await?; Ok(Self { config, index: ArcSwap::new(Arc::new(index)), master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))), pool, landing_page: ArcSwap::new(Arc::new(None)), }) } pub async fn conn(&self) -> Box<dyn database::pool::Connection> { self.pool.connection().await } /// Returns the not yet tested commits pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> { let conn = self.conn().await; let (queued_pr_commits, in_progress_artifacts) = futures::join!(conn.queued_commits(), conn.in_progress_artifacts()); let master_commits = &self.get_master_commits().commits; let index = self.index.load(); let all_commits = index .commits() .iter() .map(|commit| commit.sha.clone()) .collect::<HashSet<_>>(); calculate_missing( master_commits.clone(), queued_pr_commits, in_progress_artifacts, all_commits, ) } /// Returns the not yet tested published artifacts, sorted from newest to oldest. pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> { let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt") .await? .text() .await?; lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let conn = self.conn().await; let index = self.index.load(); let tested_artifacts: HashSet<_> = index.artifacts().collect(); let in_progress_tagged_artifacts: HashSet<_> = conn .in_progress_artifacts() .await .into_iter() .filter_map(|artifact| match artifact { ArtifactId::Commit(_) => None, ArtifactId::Tag(tag) => Some(tag), }) .collect(); // Gather at most last 20 published artifacts that are not yet tested and // are not in progress. let artifacts: Vec<_> = artifact_list .lines() .rev() .filter_map(parse_published_artifact_tag) .take(20) .filter(|artifact| { !tested_artifacts.contains(artifact.as_str()) &&!in_progress_tagged_artifacts.contains(artifact.as_str()) }) .collect(); Ok(artifacts) } pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> { let benchmarks = self.pool.connection().await.get_compile_benchmarks().await; benchmarks .into_iter() .map(|bench| { ( bench.name.as_str().into(), Category::from_db_representation(&bench.category).unwrap(), ) }) .collect() } /// Get cached master-branch Rust commits. /// Returns cached results immediately, but if the cached value is older than one minute, /// updates in a background task for next time. pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> { let commits = self.master_commits.load(); if commits.updated.elapsed() > std::time::Duration::from_secs(60) { let master_commits = self.master_commits.clone(); tokio::task::spawn(async move { // if another update happens before this one is done, we will download the data twice, but that's it match MasterCommitCache::download().await { Ok(commits) => master_commits.store(Arc::new(commits)), Err(e) => { // couldn't get the data, keep serving cached results for now error!("error retrieving master commit list: {}", e) } } }); } commits } } /// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from /// `https://static.rust-lang.org/manifests.txt`. fn parse_published_artifact_tag(line: &str) -> Option<String> { lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let mut parts = line.rsplit('/'); let name = parts.next(); let date = parts.next(); if let Some(date) = date { if let Some(name) = name { // Create beta artifact in the form of beta-YYYY-MM-DD if name == "channel-rust-beta.toml" { return Some(format!("beta-{date}")); } else if let Some(capture) = VERSION_REGEX.captures(name) { if let Some(version) = capture.get(1).map(|c| c.as_str()) { return Some(version.to_string()); } } } } None } /// Calculating the missing commits. fn calculate_missing( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, all_commits: HashSet<String>, ) -> Vec<(Commit, MissingReason)> { calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, Utc::now(), ) } /// Calculate the missing commits filtering out any that are 29 days or older than the supplied time. /// /// This is used by `calculate_missing` is exists as a separate function for testing purposes. fn calculate_missing_from( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, mut all_commits: HashSet<String>, time: chrono::DateTime<chrono::Utc>, ) -> Vec<(Commit, MissingReason)> { let mut queue = master_commits .into_iter() .filter(|c| time.signed_duration_since(c.time) < Duration::days(29)) .map(|c| { ( Commit { sha: c.sha, date: Date(c.time), r#type: CommitType::Master, }, // All recent master commits should have an associated PR MissingReason::Master { pr: c.pr.unwrap_or(0), parent_sha: c.parent_sha, is_try_parent: false, }, ) }) .collect::<Vec<_>>(); let master_commits = queue .iter() .map(|(mc, _)| mc.sha.clone()) .collect::<HashSet<_>>(); for database::QueuedCommit { sha, parent_sha, pr, include, exclude, runs, commit_date, } in queued_pr_commits .into_iter() // filter out any queued PR master commits (leaving only try commits) .filter(|c|!master_commits.contains(&c.sha)) { // Mark the parent commit as a try_parent. if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) { if let MissingReason::Master { is_try_parent,.. } = metadata { *is_try_parent = true; } else { unreachable!("try commit has non-master parent {:?}", metadata); }; } queue.push(( Commit { sha: sha.to_string(), date: commit_date.unwrap_or(Date::empty()), r#type: CommitType::Try, }, MissingReason::Try { pr, parent_sha, include, exclude, runs, }, )); } for aid in in_progress_artifacts { match aid { ArtifactId::Commit(c) => { let previous = queue .iter() .find(|(i, _)| i.sha == c.sha) .map(|v| Box::new(v.1.clone())); all_commits.remove(&c.sha); queue.insert(0, (c, MissingReason::InProgress(previous))); } ArtifactId::Tag(_) => { // do nothing, for now, though eventually we'll want an artifact queue } } } let mut already_tested = all_commits.clone(); let mut i = 0; while i!= queue.len() { if!already_tested.insert(queue[i].0.sha.clone()) { queue.remove(i); } else { i += 1; } } sort_queue(all_commits.clone(), queue) } fn sort_queue( mut done: HashSet<String>, mut unordered_queue: Vec<(Commit, MissingReason)>, ) -> Vec<(Commit, MissingReason)> {
let mut finished = 0; while finished < unordered_queue.len() { // The next level is those elements in the unordered queue which // are ready to be benchmarked (i.e., those with parent in done or no // parent). let level_len = partition_in_place(unordered_queue[finished..].iter_mut(), |(_, mr)| { mr.parent_sha().map_or(true, |parent| done.contains(parent)) }); assert!( level_len!= 0, "at least one commit is ready done={:#?}, {:?}", done, &unordered_queue[finished..] ); let level = &mut unordered_queue[finished..][..level_len]; level.sort_unstable_by_key(|(c, mr)| { ( // InProgress MR go first (false < true) mr.parent_sha().is_some(), mr.pr().unwrap_or(0), c.sha.clone(), ) }); for (c, _) in level { done.insert(c.sha.clone()); } finished += level_len; } unordered_queue } // Copy of Iterator::partition_in_place, which is currently unstable. fn partition_in_place<'a, I, T: 'a, P>(mut iter: I, mut predicate: P) -> usize where I: Sized + DoubleEndedIterator<Item = &'a mut T>, P: FnMut(&T) -> bool, { // FIXME: should we worry about the count overflowing? The only way to have more than // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition... // These closure "factory" functions exist to avoid genericity in `Self`. #[inline] fn is_false<'a, T>( predicate: &'a mut impl FnMut(&T) -> bool, true_count: &'a mut usize, ) -> impl FnMut(&&mut T) -> bool + 'a { move |x| { let p = predicate(&**x); *true_count += p as usize; !p } } #[inline] fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ { move |x| predicate(&**x) } // Repeatedly find the first `false` and swap it with the last `true`. let mut true_count = 0; while let Some(head) = iter.find(is_false(&mut predicate, &mut true_count)) { if let Some(tail) = iter.rfind(is_true(&mut predicate)) { std::mem::swap(head, tail); true_count += 1; } else { break; } } true_count } /// One decimal place rounded percent #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] pub struct Percent(#[serde(with = "collector::round_float")] pub f64); #[cfg(test)] mod tests { use std::str::FromStr; use collector::MasterCommit; use database::QueuedCommit; use super::*; // Checks that when we have a setup like the following, where a -> b means b // is the parent of a (i.e., must be tested before we can report comparison // results for a): // // a -> b // -> try-on-a // // the resulting ordering is: // // b // a // try-on-a // // which ensures that as each commit finishes, we have the results for it. // // Note that try-on-a does *not* have a direct dependency on b's results // being available; we could order b after ([a, try-on-a, b]) but this means // that we have to be more careful about posting comparison results, and to // most observers they expect those posted as soon as the PR's build in the // queue finishes: not doing so will look odd to onlookers. #[test] fn try_commit_ancestors() { let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap(); let master_commits = vec![ MasterCommit { sha: "a".into(), parent_sha: "b".into(), pr: Some(2), time, }, MasterCommit { sha: "b".into(), parent_sha: "c".into(), pr: Some(1), time, }, ]; let queued_pr_commits = vec![ QueuedCommit { sha: "try-on-a".into(), parent_sha: "a".into(), pr: 3, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "b".into(), parent_sha: "c".into(), pr: 1, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "a".into(), parent_sha: "b".into(), pr: 2, include: None, exclude: None, runs: None, commit_date: None, }, ]; let in_progress_artifacts = vec![]; let mut all_commits = HashSet::new(); all_commits.insert("c".into()); let expected = vec![ ( Commit { sha: "b".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 1, parent_sha: "c".into(), is_try_parent: false, }, ), ( Commit { sha: "a".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 2, parent_sha: "b".into(), is_try_parent: true, }, ), ( Commit { sha: "try-on-a".into(), date: database::Date(time), r#type: CommitType::Try, }, MissingReason::Try { pr: 3, parent_sha: "
// A topological sort, where each "level" is additionally altered such that // try commits come first, and then sorted by PR # (as a rough heuristic for // earlier requests).
random_line_split
load.rs
Instant, } impl MasterCommitCache { /// Download the master-branch Rust commit list pub async fn download() -> anyhow::Result<Self> { let commits = collector::master_commits().await?; Ok(Self { commits, updated: Instant::now(), }) } } /// Site context object that contains global data pub struct SiteCtxt { /// Site configuration pub config: Config, /// Cached site landing page pub landing_page: ArcSwap<Option<Arc<crate::api::graphs::Response>>>, /// Index of various common queries pub index: ArcSwap<crate::db::Index>, /// Cached master-branch Rust commits pub master_commits: Arc<ArcSwap<MasterCommitCache>>, // outer Arc enables mutation in background task /// Database connection pool pub pool: Pool, } impl SiteCtxt { pub fn summary_scenarios(&self) -> Vec<crate::db::Scenario> { vec![ crate::db::Scenario::Empty, crate::db::Scenario::IncrementalEmpty, crate::db::Scenario::IncrementalFresh, crate::db::Scenario::IncrementalPatch("println".into()), ] } pub fn artifact_id_for_bound(&self, query: Bound, is_left: bool) -> Option<ArtifactId> { crate::selector::artifact_id_for_bound(&self.index.load(), query, is_left) } pub fn data_range(&self, range: RangeInclusive<Bound>) -> Vec<Commit> { crate::selector::range_subset(self.index.load().commits(), range) } /// Initialize `SiteCtxt` from database url pub async fn from_db_url(db_url: &str) -> anyhow::Result<Self> { let pool = Pool::open(db_url); let mut conn = pool.connection().await; let index = db::Index::load(&mut *conn).await; let config = if let Ok(s) = fs::read_to_string("site-config.toml") { toml::from_str(&s)? } else { Config { keys: Keys { github_api_token: std::env::var("GITHUB_API_TOKEN").ok(), github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(), }, } }; let master_commits = MasterCommitCache::download().await?; Ok(Self { config, index: ArcSwap::new(Arc::new(index)), master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))), pool, landing_page: ArcSwap::new(Arc::new(None)), }) } pub async fn conn(&self) -> Box<dyn database::pool::Connection> { self.pool.connection().await } /// Returns the not yet tested commits pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> { let conn = self.conn().await; let (queued_pr_commits, in_progress_artifacts) = futures::join!(conn.queued_commits(), conn.in_progress_artifacts()); let master_commits = &self.get_master_commits().commits; let index = self.index.load(); let all_commits = index .commits() .iter() .map(|commit| commit.sha.clone()) .collect::<HashSet<_>>(); calculate_missing( master_commits.clone(), queued_pr_commits, in_progress_artifacts, all_commits, ) } /// Returns the not yet tested published artifacts, sorted from newest to oldest. pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> { let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt") .await? .text() .await?; lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let conn = self.conn().await; let index = self.index.load(); let tested_artifacts: HashSet<_> = index.artifacts().collect(); let in_progress_tagged_artifacts: HashSet<_> = conn .in_progress_artifacts() .await .into_iter() .filter_map(|artifact| match artifact { ArtifactId::Commit(_) => None, ArtifactId::Tag(tag) => Some(tag), }) .collect(); // Gather at most last 20 published artifacts that are not yet tested and // are not in progress. let artifacts: Vec<_> = artifact_list .lines() .rev() .filter_map(parse_published_artifact_tag) .take(20) .filter(|artifact| { !tested_artifacts.contains(artifact.as_str()) &&!in_progress_tagged_artifacts.contains(artifact.as_str()) }) .collect(); Ok(artifacts) } pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> { let benchmarks = self.pool.connection().await.get_compile_benchmarks().await; benchmarks .into_iter() .map(|bench| { ( bench.name.as_str().into(), Category::from_db_representation(&bench.category).unwrap(), ) }) .collect() } /// Get cached master-branch Rust commits. /// Returns cached results immediately, but if the cached value is older than one minute, /// updates in a background task for next time. pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> { let commits = self.master_commits.load(); if commits.updated.elapsed() > std::time::Duration::from_secs(60) { let master_commits = self.master_commits.clone(); tokio::task::spawn(async move { // if another update happens before this one is done, we will download the data twice, but that's it match MasterCommitCache::download().await { Ok(commits) => master_commits.store(Arc::new(commits)), Err(e) => { // couldn't get the data, keep serving cached results for now error!("error retrieving master commit list: {}", e) } } }); } commits } } /// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from /// `https://static.rust-lang.org/manifests.txt`. fn parse_published_artifact_tag(line: &str) -> Option<String> { lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let mut parts = line.rsplit('/'); let name = parts.next(); let date = parts.next(); if let Some(date) = date { if let Some(name) = name { // Create beta artifact in the form of beta-YYYY-MM-DD if name == "channel-rust-beta.toml" { return Some(format!("beta-{date}")); } else if let Some(capture) = VERSION_REGEX.captures(name) { if let Some(version) = capture.get(1).map(|c| c.as_str()) { return Some(version.to_string()); } } } } None } /// Calculating the missing commits. fn calculate_missing( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, all_commits: HashSet<String>, ) -> Vec<(Commit, MissingReason)> { calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, Utc::now(), ) } /// Calculate the missing commits filtering out any that are 29 days or older than the supplied time. /// /// This is used by `calculate_missing` is exists as a separate function for testing purposes. fn calculate_missing_from( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, mut all_commits: HashSet<String>, time: chrono::DateTime<chrono::Utc>, ) -> Vec<(Commit, MissingReason)> { let mut queue = master_commits .into_iter() .filter(|c| time.signed_duration_since(c.time) < Duration::days(29)) .map(|c| { ( Commit { sha: c.sha, date: Date(c.time), r#type: CommitType::Master, }, // All recent master commits should have an associated PR MissingReason::Master { pr: c.pr.unwrap_or(0), parent_sha: c.parent_sha, is_try_parent: false, }, ) }) .collect::<Vec<_>>(); let master_commits = queue .iter() .map(|(mc, _)| mc.sha.clone()) .collect::<HashSet<_>>(); for database::QueuedCommit { sha, parent_sha, pr, include, exclude, runs, commit_date, } in queued_pr_commits .into_iter() // filter out any queued PR master commits (leaving only try commits) .filter(|c|!master_commits.contains(&c.sha)) { // Mark the parent commit as a try_parent. if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) { if let MissingReason::Master { is_try_parent,.. } = metadata { *is_try_parent = true; } else { unreachable!("try commit has non-master parent {:?}", metadata); }; } queue.push(( Commit { sha: sha.to_string(), date: commit_date.unwrap_or(Date::empty()), r#type: CommitType::Try, }, MissingReason::Try { pr, parent_sha, include, exclude, runs, }, )); } for aid in in_progress_artifacts { match aid { ArtifactId::Commit(c) => { let previous = queue .iter() .find(|(i, _)| i.sha == c.sha) .map(|v| Box::new(v.1.clone())); all_commits.remove(&c.sha); queue.insert(0, (c, MissingReason::InProgress(previous))); } ArtifactId::Tag(_) =>
} } let mut already_tested = all_commits.clone(); let mut i = 0; while i!= queue.len() { if!already_tested.insert(queue[i].0.sha.clone()) { queue.remove(i); } else { i += 1; } } sort_queue(all_commits.clone(), queue) } fn sort_queue( mut done: HashSet<String>, mut unordered_queue: Vec<(Commit, MissingReason)>, ) -> Vec<(Commit, MissingReason)> { // A topological sort, where each "level" is additionally altered such that // try commits come first, and then sorted by PR # (as a rough heuristic for // earlier requests). let mut finished = 0; while finished < unordered_queue.len() { // The next level is those elements in the unordered queue which // are ready to be benchmarked (i.e., those with parent in done or no // parent). let level_len = partition_in_place(unordered_queue[finished..].iter_mut(), |(_, mr)| { mr.parent_sha().map_or(true, |parent| done.contains(parent)) }); assert!( level_len!= 0, "at least one commit is ready done={:#?}, {:?}", done, &unordered_queue[finished..] ); let level = &mut unordered_queue[finished..][..level_len]; level.sort_unstable_by_key(|(c, mr)| { ( // InProgress MR go first (false < true) mr.parent_sha().is_some(), mr.pr().unwrap_or(0), c.sha.clone(), ) }); for (c, _) in level { done.insert(c.sha.clone()); } finished += level_len; } unordered_queue } // Copy of Iterator::partition_in_place, which is currently unstable. fn partition_in_place<'a, I, T: 'a, P>(mut iter: I, mut predicate: P) -> usize where I: Sized + DoubleEndedIterator<Item = &'a mut T>, P: FnMut(&T) -> bool, { // FIXME: should we worry about the count overflowing? The only way to have more than // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition... // These closure "factory" functions exist to avoid genericity in `Self`. #[inline] fn is_false<'a, T>( predicate: &'a mut impl FnMut(&T) -> bool, true_count: &'a mut usize, ) -> impl FnMut(&&mut T) -> bool + 'a { move |x| { let p = predicate(&**x); *true_count += p as usize; !p } } #[inline] fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ { move |x| predicate(&**x) } // Repeatedly find the first `false` and swap it with the last `true`. let mut true_count = 0; while let Some(head) = iter.find(is_false(&mut predicate, &mut true_count)) { if let Some(tail) = iter.rfind(is_true(&mut predicate)) { std::mem::swap(head, tail); true_count += 1; } else { break; } } true_count } /// One decimal place rounded percent #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] pub struct Percent(#[serde(with = "collector::round_float")] pub f64); #[cfg(test)] mod tests { use std::str::FromStr; use collector::MasterCommit; use database::QueuedCommit; use super::*; // Checks that when we have a setup like the following, where a -> b means b // is the parent of a (i.e., must be tested before we can report comparison // results for a): // // a -> b // -> try-on-a // // the resulting ordering is: // // b // a // try-on-a // // which ensures that as each commit finishes, we have the results for it. // // Note that try-on-a does *not* have a direct dependency on b's results // being available; we could order b after ([a, try-on-a, b]) but this means // that we have to be more careful about posting comparison results, and to // most observers they expect those posted as soon as the PR's build in the // queue finishes: not doing so will look odd to onlookers. #[test] fn try_commit_ancestors() { let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap(); let master_commits = vec![ MasterCommit { sha: "a".into(), parent_sha: "b".into(), pr: Some(2), time, }, MasterCommit { sha: "b".into(), parent_sha: "c".into(), pr: Some(1), time, }, ]; let queued_pr_commits = vec![ QueuedCommit { sha: "try-on-a".into(), parent_sha: "a".into(), pr: 3, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "b".into(), parent_sha: "c".into(), pr: 1, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "a".into(), parent_sha: "b".into(), pr: 2, include: None, exclude: None, runs: None, commit_date: None, }, ]; let in_progress_artifacts = vec![]; let mut all_commits = HashSet::new(); all_commits.insert("c".into()); let expected = vec![ ( Commit { sha: "b".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 1, parent_sha: "c".into(), is_try_parent: false, }, ), ( Commit { sha: "a".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 2, parent_sha: "b".into(), is_try_parent: true, }, ), ( Commit { sha: "try-on-a".into(), date: database::Date(time), r#type: CommitType::Try, }, MissingReason::Try { pr: 3, parent_sha
{ // do nothing, for now, though eventually we'll want an artifact queue }
conditional_block
load.rs
one is done, we will download the data twice, but that's it match MasterCommitCache::download().await { Ok(commits) => master_commits.store(Arc::new(commits)), Err(e) => { // couldn't get the data, keep serving cached results for now error!("error retrieving master commit list: {}", e) } } }); } commits } } /// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from /// `https://static.rust-lang.org/manifests.txt`. fn parse_published_artifact_tag(line: &str) -> Option<String> { lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let mut parts = line.rsplit('/'); let name = parts.next(); let date = parts.next(); if let Some(date) = date { if let Some(name) = name { // Create beta artifact in the form of beta-YYYY-MM-DD if name == "channel-rust-beta.toml" { return Some(format!("beta-{date}")); } else if let Some(capture) = VERSION_REGEX.captures(name) { if let Some(version) = capture.get(1).map(|c| c.as_str()) { return Some(version.to_string()); } } } } None } /// Calculating the missing commits. fn calculate_missing( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, all_commits: HashSet<String>, ) -> Vec<(Commit, MissingReason)> { calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, Utc::now(), ) } /// Calculate the missing commits filtering out any that are 29 days or older than the supplied time. /// /// This is used by `calculate_missing` is exists as a separate function for testing purposes. fn calculate_missing_from( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, mut all_commits: HashSet<String>, time: chrono::DateTime<chrono::Utc>, ) -> Vec<(Commit, MissingReason)> { let mut queue = master_commits .into_iter() .filter(|c| time.signed_duration_since(c.time) < Duration::days(29)) .map(|c| { ( Commit { sha: c.sha, date: Date(c.time), r#type: CommitType::Master, }, // All recent master commits should have an associated PR MissingReason::Master { pr: c.pr.unwrap_or(0), parent_sha: c.parent_sha, is_try_parent: false, }, ) }) .collect::<Vec<_>>(); let master_commits = queue .iter() .map(|(mc, _)| mc.sha.clone()) .collect::<HashSet<_>>(); for database::QueuedCommit { sha, parent_sha, pr, include, exclude, runs, commit_date, } in queued_pr_commits .into_iter() // filter out any queued PR master commits (leaving only try commits) .filter(|c|!master_commits.contains(&c.sha)) { // Mark the parent commit as a try_parent. if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) { if let MissingReason::Master { is_try_parent,.. } = metadata { *is_try_parent = true; } else { unreachable!("try commit has non-master parent {:?}", metadata); }; } queue.push(( Commit { sha: sha.to_string(), date: commit_date.unwrap_or(Date::empty()), r#type: CommitType::Try, }, MissingReason::Try { pr, parent_sha, include, exclude, runs, }, )); } for aid in in_progress_artifacts { match aid { ArtifactId::Commit(c) => { let previous = queue .iter() .find(|(i, _)| i.sha == c.sha) .map(|v| Box::new(v.1.clone())); all_commits.remove(&c.sha); queue.insert(0, (c, MissingReason::InProgress(previous))); } ArtifactId::Tag(_) => { // do nothing, for now, though eventually we'll want an artifact queue } } } let mut already_tested = all_commits.clone(); let mut i = 0; while i!= queue.len() { if!already_tested.insert(queue[i].0.sha.clone()) { queue.remove(i); } else { i += 1; } } sort_queue(all_commits.clone(), queue) } fn sort_queue( mut done: HashSet<String>, mut unordered_queue: Vec<(Commit, MissingReason)>, ) -> Vec<(Commit, MissingReason)> { // A topological sort, where each "level" is additionally altered such that // try commits come first, and then sorted by PR # (as a rough heuristic for // earlier requests). let mut finished = 0; while finished < unordered_queue.len() { // The next level is those elements in the unordered queue which // are ready to be benchmarked (i.e., those with parent in done or no // parent). let level_len = partition_in_place(unordered_queue[finished..].iter_mut(), |(_, mr)| { mr.parent_sha().map_or(true, |parent| done.contains(parent)) }); assert!( level_len!= 0, "at least one commit is ready done={:#?}, {:?}", done, &unordered_queue[finished..] ); let level = &mut unordered_queue[finished..][..level_len]; level.sort_unstable_by_key(|(c, mr)| { ( // InProgress MR go first (false < true) mr.parent_sha().is_some(), mr.pr().unwrap_or(0), c.sha.clone(), ) }); for (c, _) in level { done.insert(c.sha.clone()); } finished += level_len; } unordered_queue } // Copy of Iterator::partition_in_place, which is currently unstable. fn partition_in_place<'a, I, T: 'a, P>(mut iter: I, mut predicate: P) -> usize where I: Sized + DoubleEndedIterator<Item = &'a mut T>, P: FnMut(&T) -> bool, { // FIXME: should we worry about the count overflowing? The only way to have more than // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition... // These closure "factory" functions exist to avoid genericity in `Self`. #[inline] fn is_false<'a, T>( predicate: &'a mut impl FnMut(&T) -> bool, true_count: &'a mut usize, ) -> impl FnMut(&&mut T) -> bool + 'a { move |x| { let p = predicate(&**x); *true_count += p as usize; !p } } #[inline] fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ { move |x| predicate(&**x) } // Repeatedly find the first `false` and swap it with the last `true`. let mut true_count = 0; while let Some(head) = iter.find(is_false(&mut predicate, &mut true_count)) { if let Some(tail) = iter.rfind(is_true(&mut predicate)) { std::mem::swap(head, tail); true_count += 1; } else { break; } } true_count } /// One decimal place rounded percent #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] pub struct Percent(#[serde(with = "collector::round_float")] pub f64); #[cfg(test)] mod tests { use std::str::FromStr; use collector::MasterCommit; use database::QueuedCommit; use super::*; // Checks that when we have a setup like the following, where a -> b means b // is the parent of a (i.e., must be tested before we can report comparison // results for a): // // a -> b // -> try-on-a // // the resulting ordering is: // // b // a // try-on-a // // which ensures that as each commit finishes, we have the results for it. // // Note that try-on-a does *not* have a direct dependency on b's results // being available; we could order b after ([a, try-on-a, b]) but this means // that we have to be more careful about posting comparison results, and to // most observers they expect those posted as soon as the PR's build in the // queue finishes: not doing so will look odd to onlookers. #[test] fn try_commit_ancestors() { let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap(); let master_commits = vec![ MasterCommit { sha: "a".into(), parent_sha: "b".into(), pr: Some(2), time, }, MasterCommit { sha: "b".into(), parent_sha: "c".into(), pr: Some(1), time, }, ]; let queued_pr_commits = vec![ QueuedCommit { sha: "try-on-a".into(), parent_sha: "a".into(), pr: 3, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "b".into(), parent_sha: "c".into(), pr: 1, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "a".into(), parent_sha: "b".into(), pr: 2, include: None, exclude: None, runs: None, commit_date: None, }, ]; let in_progress_artifacts = vec![]; let mut all_commits = HashSet::new(); all_commits.insert("c".into()); let expected = vec![ ( Commit { sha: "b".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 1, parent_sha: "c".into(), is_try_parent: false, }, ), ( Commit { sha: "a".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 2, parent_sha: "b".into(), is_try_parent: true, }, ), ( Commit { sha: "try-on-a".into(), date: database::Date(time), r#type: CommitType::Try, }, MissingReason::Try { pr: 3, parent_sha: "a".into(), include: None, exclude: None, runs: None, }, ), ]; let found = calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, time, ); assert_eq!(expected, found, "{:#?}!= {:#?}", expected, found); } #[test] fn calculates_missing_correct() { let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap(); let master_commits = vec![ // A not yet tested commit MasterCommit { sha: "123".into(), parent_sha: "345".into(), pr: Some(11), time, }, // An already tested commit MasterCommit { sha: "abc".into(), parent_sha: "def".into(), pr: Some(90), time, }, // A queued PR commit MasterCommit { sha: "foo".into(), parent_sha: "bar".into(), pr: Some(77), time, }, ]; let queued_pr_commits = vec![ // A master commit QueuedCommit { sha: "foo".into(), parent_sha: "bar".into(), pr: 77, include: None, exclude: None, runs: None, commit_date: None, }, // A try run QueuedCommit { sha: "baz".into(), parent_sha: "foo".into(), pr: 101, include: None, exclude: None, runs: None, commit_date: None, }, ]; let in_progress_artifacts = vec![]; let mut all_commits = HashSet::new(); all_commits.insert(master_commits[1].sha.clone()); // Parent trailers all_commits.insert(master_commits[0].parent_sha.clone()); all_commits.insert(master_commits[1].parent_sha.clone()); all_commits.insert(master_commits[2].parent_sha.clone()); let expected = vec![ ( Commit { sha: "123".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 11, parent_sha: "345".into(), is_try_parent: false, }, ), ( Commit { sha: "foo".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 77, parent_sha: "bar".into(), is_try_parent: true, }, ), ( Commit { sha: "baz".into(), date: database::Date(time), r#type: CommitType::Try, }, MissingReason::Try { pr: 101, parent_sha: "foo".into(), include: None, exclude: None, runs: None, }, ), ]; assert_eq!( expected, calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, time ) ); } #[test] fn parse_published_beta_artifact()
{ assert_eq!( parse_published_artifact_tag( "static.rust-lang.org/dist/2022-08-15/channel-rust-beta.toml" ), Some("beta-2022-08-15".to_string()) ); }
identifier_body
load.rs
{ pub sha: String, pub parent_sha: String, } impl TryCommit { pub fn sha(&self) -> &str { self.sha.as_str() } pub fn comparison_url(&self) -> String { format!( "https://perf.rust-lang.org/compare.html?start={}&end={}", self.parent_sha, self.sha ) } } /// Keys for accessing various services /// /// At the moment only used for accessing GitHub #[derive(Debug, Default, Deserialize)] pub struct Keys { /// GitHub API token from the `GITHUB_API_TOKEN` env variable #[serde(rename = "github")] pub github_api_token: Option<String>, /// GitHub webhook secret from the `GITHUB_WEBHOOK_SECRET` env variable #[serde(rename = "secret")] pub github_webhook_secret: Option<String>, } /// Site configuration #[derive(Debug, Deserialize)] pub struct Config { pub keys: Keys, } #[derive(Debug)] pub struct MasterCommitCache { pub commits: Vec<MasterCommit>, pub updated: Instant, } impl MasterCommitCache { /// Download the master-branch Rust commit list pub async fn download() -> anyhow::Result<Self> { let commits = collector::master_commits().await?; Ok(Self { commits, updated: Instant::now(), }) } } /// Site context object that contains global data pub struct SiteCtxt { /// Site configuration pub config: Config, /// Cached site landing page pub landing_page: ArcSwap<Option<Arc<crate::api::graphs::Response>>>, /// Index of various common queries pub index: ArcSwap<crate::db::Index>, /// Cached master-branch Rust commits pub master_commits: Arc<ArcSwap<MasterCommitCache>>, // outer Arc enables mutation in background task /// Database connection pool pub pool: Pool, } impl SiteCtxt { pub fn summary_scenarios(&self) -> Vec<crate::db::Scenario> { vec![ crate::db::Scenario::Empty, crate::db::Scenario::IncrementalEmpty, crate::db::Scenario::IncrementalFresh, crate::db::Scenario::IncrementalPatch("println".into()), ] } pub fn artifact_id_for_bound(&self, query: Bound, is_left: bool) -> Option<ArtifactId> { crate::selector::artifact_id_for_bound(&self.index.load(), query, is_left) } pub fn data_range(&self, range: RangeInclusive<Bound>) -> Vec<Commit> { crate::selector::range_subset(self.index.load().commits(), range) } /// Initialize `SiteCtxt` from database url pub async fn from_db_url(db_url: &str) -> anyhow::Result<Self> { let pool = Pool::open(db_url); let mut conn = pool.connection().await; let index = db::Index::load(&mut *conn).await; let config = if let Ok(s) = fs::read_to_string("site-config.toml") { toml::from_str(&s)? } else { Config { keys: Keys { github_api_token: std::env::var("GITHUB_API_TOKEN").ok(), github_webhook_secret: std::env::var("GITHUB_WEBHOOK_SECRET").ok(), }, } }; let master_commits = MasterCommitCache::download().await?; Ok(Self { config, index: ArcSwap::new(Arc::new(index)), master_commits: Arc::new(ArcSwap::new(Arc::new(master_commits))), pool, landing_page: ArcSwap::new(Arc::new(None)), }) } pub async fn conn(&self) -> Box<dyn database::pool::Connection> { self.pool.connection().await } /// Returns the not yet tested commits pub async fn missing_commits(&self) -> Vec<(Commit, MissingReason)> { let conn = self.conn().await; let (queued_pr_commits, in_progress_artifacts) = futures::join!(conn.queued_commits(), conn.in_progress_artifacts()); let master_commits = &self.get_master_commits().commits; let index = self.index.load(); let all_commits = index .commits() .iter() .map(|commit| commit.sha.clone()) .collect::<HashSet<_>>(); calculate_missing( master_commits.clone(), queued_pr_commits, in_progress_artifacts, all_commits, ) } /// Returns the not yet tested published artifacts, sorted from newest to oldest. pub async fn missing_published_artifacts(&self) -> anyhow::Result<Vec<String>> { let artifact_list: String = reqwest::get("https://static.rust-lang.org/manifests.txt") .await? .text() .await?; lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let conn = self.conn().await; let index = self.index.load(); let tested_artifacts: HashSet<_> = index.artifacts().collect(); let in_progress_tagged_artifacts: HashSet<_> = conn .in_progress_artifacts() .await .into_iter() .filter_map(|artifact| match artifact { ArtifactId::Commit(_) => None, ArtifactId::Tag(tag) => Some(tag), }) .collect(); // Gather at most last 20 published artifacts that are not yet tested and // are not in progress. let artifacts: Vec<_> = artifact_list .lines() .rev() .filter_map(parse_published_artifact_tag) .take(20) .filter(|artifact| { !tested_artifacts.contains(artifact.as_str()) &&!in_progress_tagged_artifacts.contains(artifact.as_str()) }) .collect(); Ok(artifacts) } pub async fn get_benchmark_category_map(&self) -> HashMap<Benchmark, Category> { let benchmarks = self.pool.connection().await.get_compile_benchmarks().await; benchmarks .into_iter() .map(|bench| { ( bench.name.as_str().into(), Category::from_db_representation(&bench.category).unwrap(), ) }) .collect() } /// Get cached master-branch Rust commits. /// Returns cached results immediately, but if the cached value is older than one minute, /// updates in a background task for next time. pub fn get_master_commits(&self) -> Guard<Arc<MasterCommitCache>> { let commits = self.master_commits.load(); if commits.updated.elapsed() > std::time::Duration::from_secs(60) { let master_commits = self.master_commits.clone(); tokio::task::spawn(async move { // if another update happens before this one is done, we will download the data twice, but that's it match MasterCommitCache::download().await { Ok(commits) => master_commits.store(Arc::new(commits)), Err(e) => { // couldn't get the data, keep serving cached results for now error!("error retrieving master commit list: {}", e) } } }); } commits } } /// Parses an artifact tag like `1.63.0` or `beta-2022-08-19` from a line taken from /// `https://static.rust-lang.org/manifests.txt`. fn parse_published_artifact_tag(line: &str) -> Option<String> { lazy_static! { static ref VERSION_REGEX: Regex = Regex::new(r"(\d+\.\d+.\d+)").unwrap(); } let mut parts = line.rsplit('/'); let name = parts.next(); let date = parts.next(); if let Some(date) = date { if let Some(name) = name { // Create beta artifact in the form of beta-YYYY-MM-DD if name == "channel-rust-beta.toml" { return Some(format!("beta-{date}")); } else if let Some(capture) = VERSION_REGEX.captures(name) { if let Some(version) = capture.get(1).map(|c| c.as_str()) { return Some(version.to_string()); } } } } None } /// Calculating the missing commits. fn calculate_missing( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, all_commits: HashSet<String>, ) -> Vec<(Commit, MissingReason)> { calculate_missing_from( master_commits, queued_pr_commits, in_progress_artifacts, all_commits, Utc::now(), ) } /// Calculate the missing commits filtering out any that are 29 days or older than the supplied time. /// /// This is used by `calculate_missing` is exists as a separate function for testing purposes. fn calculate_missing_from( master_commits: Vec<collector::MasterCommit>, queued_pr_commits: Vec<database::QueuedCommit>, in_progress_artifacts: Vec<ArtifactId>, mut all_commits: HashSet<String>, time: chrono::DateTime<chrono::Utc>, ) -> Vec<(Commit, MissingReason)> { let mut queue = master_commits .into_iter() .filter(|c| time.signed_duration_since(c.time) < Duration::days(29)) .map(|c| { ( Commit { sha: c.sha, date: Date(c.time), r#type: CommitType::Master, }, // All recent master commits should have an associated PR MissingReason::Master { pr: c.pr.unwrap_or(0), parent_sha: c.parent_sha, is_try_parent: false, }, ) }) .collect::<Vec<_>>(); let master_commits = queue .iter() .map(|(mc, _)| mc.sha.clone()) .collect::<HashSet<_>>(); for database::QueuedCommit { sha, parent_sha, pr, include, exclude, runs, commit_date, } in queued_pr_commits .into_iter() // filter out any queued PR master commits (leaving only try commits) .filter(|c|!master_commits.contains(&c.sha)) { // Mark the parent commit as a try_parent. if let Some((_, metadata)) = queue.iter_mut().find(|(m, _)| m.sha == parent_sha.as_str()) { if let MissingReason::Master { is_try_parent,.. } = metadata { *is_try_parent = true; } else { unreachable!("try commit has non-master parent {:?}", metadata); }; } queue.push(( Commit { sha: sha.to_string(), date: commit_date.unwrap_or(Date::empty()), r#type: CommitType::Try, }, MissingReason::Try { pr, parent_sha, include, exclude, runs, }, )); } for aid in in_progress_artifacts { match aid { ArtifactId::Commit(c) => { let previous = queue .iter() .find(|(i, _)| i.sha == c.sha) .map(|v| Box::new(v.1.clone())); all_commits.remove(&c.sha); queue.insert(0, (c, MissingReason::InProgress(previous))); } ArtifactId::Tag(_) => { // do nothing, for now, though eventually we'll want an artifact queue } } } let mut already_tested = all_commits.clone(); let mut i = 0; while i!= queue.len() { if!already_tested.insert(queue[i].0.sha.clone()) { queue.remove(i); } else { i += 1; } } sort_queue(all_commits.clone(), queue) } fn sort_queue( mut done: HashSet<String>, mut unordered_queue: Vec<(Commit, MissingReason)>, ) -> Vec<(Commit, MissingReason)> { // A topological sort, where each "level" is additionally altered such that // try commits come first, and then sorted by PR # (as a rough heuristic for // earlier requests). let mut finished = 0; while finished < unordered_queue.len() { // The next level is those elements in the unordered queue which // are ready to be benchmarked (i.e., those with parent in done or no // parent). let level_len = partition_in_place(unordered_queue[finished..].iter_mut(), |(_, mr)| { mr.parent_sha().map_or(true, |parent| done.contains(parent)) }); assert!( level_len!= 0, "at least one commit is ready done={:#?}, {:?}", done, &unordered_queue[finished..] ); let level = &mut unordered_queue[finished..][..level_len]; level.sort_unstable_by_key(|(c, mr)| { ( // InProgress MR go first (false < true) mr.parent_sha().is_some(), mr.pr().unwrap_or(0), c.sha.clone(), ) }); for (c, _) in level { done.insert(c.sha.clone()); } finished += level_len; } unordered_queue } // Copy of Iterator::partition_in_place, which is currently unstable. fn partition_in_place<'a, I, T: 'a, P>(mut iter: I, mut predicate: P) -> usize where I: Sized + DoubleEndedIterator<Item = &'a mut T>, P: FnMut(&T) -> bool, { // FIXME: should we worry about the count overflowing? The only way to have more than // `usize::MAX` mutable references is with ZSTs, which aren't useful to partition... // These closure "factory" functions exist to avoid genericity in `Self`. #[inline] fn is_false<'a, T>( predicate: &'a mut impl FnMut(&T) -> bool, true_count: &'a mut usize, ) -> impl FnMut(&&mut T) -> bool + 'a { move |x| { let p = predicate(&**x); *true_count += p as usize; !p } } #[inline] fn is_true<T>(predicate: &mut impl FnMut(&T) -> bool) -> impl FnMut(&&mut T) -> bool + '_ { move |x| predicate(&**x) } // Repeatedly find the first `false` and swap it with the last `true`. let mut true_count = 0; while let Some(head) = iter.find(is_false(&mut predicate, &mut true_count)) { if let Some(tail) = iter.rfind(is_true(&mut predicate)) { std::mem::swap(head, tail); true_count += 1; } else { break; } } true_count } /// One decimal place rounded percent #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] pub struct Percent(#[serde(with = "collector::round_float")] pub f64); #[cfg(test)] mod tests { use std::str::FromStr; use collector::MasterCommit; use database::QueuedCommit; use super::*; // Checks that when we have a setup like the following, where a -> b means b // is the parent of a (i.e., must be tested before we can report comparison // results for a): // // a -> b // -> try-on-a // // the resulting ordering is: // // b // a // try-on-a // // which ensures that as each commit finishes, we have the results for it. // // Note that try-on-a does *not* have a direct dependency on b's results // being available; we could order b after ([a, try-on-a, b]) but this means // that we have to be more careful about posting comparison results, and to // most observers they expect those posted as soon as the PR's build in the // queue finishes: not doing so will look odd to onlookers. #[test] fn try_commit_ancestors() { let time = chrono::DateTime::from_str("2021-09-01T00:00:00.000Z").unwrap(); let master_commits = vec![ MasterCommit { sha: "a".into(), parent_sha: "b".into(), pr: Some(2), time, }, MasterCommit { sha: "b".into(), parent_sha: "c".into(), pr: Some(1), time, }, ]; let queued_pr_commits = vec![ QueuedCommit { sha: "try-on-a".into(), parent_sha: "a".into(), pr: 3, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "b".into(), parent_sha: "c".into(), pr: 1, include: None, exclude: None, runs: None, commit_date: None, }, QueuedCommit { sha: "a".into(), parent_sha: "b".into(), pr: 2, include: None, exclude: None, runs: None, commit_date: None, }, ]; let in_progress_artifacts = vec![]; let mut all_commits = HashSet::new(); all_commits.insert("c".into()); let expected = vec![ ( Commit { sha: "b".into(), date: database::Date(time), r#type: CommitType::Master, }, MissingReason::Master { pr: 1, parent_sha: "c".into(), is_try_parent: false, }, ), ( Commit { sha: "a".into(), date: database::Date(time), r#type: CommitType::Master,
TryCommit
identifier_name
iso_spec.rs
//! This module contains implementation of specification, its segments and associated operations //! use std::collections::HashMap; use std::fmt::{Display, Formatter}; use std::io::Cursor; use crate::iso8583::{bitmap, IsoError}; use crate::iso8583::field::{Field, ParseError}; use crate::iso8583::yaml_de::YMessageSegment; use crate::iso8583::bitmap::Bitmap; use crate::iso8583::config::Config; use crate::crypto::pin::generate_pin_block; use crate::crypto::mac::generate_mac; // Reads the spec definitions from YAML file lazy_static! { static ref ALL_SPECS: std::collections::HashMap<String,Spec> ={ println!("current-dir: {}",std::env::current_dir().unwrap().to_str().unwrap()); let mut spec_file = String::new(); match std::env::var_os("SPEC_FILE") { Some(v) => { spec_file.push_str(v.to_str().unwrap()); println!("spec-file: {}",spec_file) } None => panic!("SPEC_FILE env variable not defined!") } let mut specs=HashMap::<String,Spec>::new(); match crate::iso8583::yaml_de::read_spec(spec_file.as_str()){ Ok(spec)=> specs.insert(String::from(spec.name()),spec), Err(e)=> panic!(e.msg) }; specs }; } /// This struct is the definition of the specification - layout of fields etc.. pub struct Spec { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) messages: Vec<MessageSegment>, pub(in crate::iso8583) header_fields: Vec<Box<dyn Field>>, } /// This struct represents a segment in the Spec (a auth request, a response etc) pub struct MessageSegment { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) selector: Vec<String>, pub(in crate::iso8583) fields: Vec<Box<dyn Field>>, } impl From<&YMessageSegment> for MessageSegment { fn from(yms: &YMessageSegment) -> Self { let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new(); yms.fields.iter().for_each(|f| { fields.push(Into::<Box<dyn Field>>::into(f)); }); MessageSegment { name: yms.name.clone(), id: yms.id, selector: yms.selector.iter().map(|s| s.clone()).collect(), fields, } } } /// Operations on MessageSegment impl MessageSegment { /// Returns name of segment pub fn name(&self) -> &str { return self.name.as_str(); } /// Returns a field given it's name if defined in the spec or a IsoError if the field is not found pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> { match self.fields.iter().find(|field| -> bool{ if field.name() == name { true } else { false } }) { None => { //try bitmap let bmp = self.field_by_name(&"bitmap".to_string()).unwrap(); Ok(bmp.child_by_name(name)) } Some(f) => { Ok(f.as_ref()) } } } } impl Spec { pub fn name(&self) -> &str { &self.name } /// Returns a message segment given its name or a IsoError if such a segment is not present pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.name() == name { return Ok(msg); } } return Err(IsoError { msg: format!("{} message not found", name) }); } /// Returns a message that corresponds to the given header value or an IsoError if such a selector /// doesn't exist pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.selector.contains(&header_val.to_string()) { return Ok(msg); } } return Err(IsoError { msg: format!("message not found for header - {}", header_val) }); } /// Returns a segment by first parsing the header field and then matching the header value against /// the selector pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> { let mut selector = String::new(); let mut f2d_map = HashMap::new(); let mut in_buf = Cursor::new(data); for f in &self.header_fields { match f.parse(&mut in_buf, &mut f2d_map) { Ok(_) => { selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars()); } Err(e) => { return Err(IsoError { msg: e.msg }); } } } debug!("computed header value for incoming message = {}", selector); match self.get_message_from_header(selector.as_str()) { Ok(msg) => { Ok(msg) } Err(e) => Err(e) } } } /// This struct represents a parsed message for a given spec pub struct IsoMsg { // The spec associated with this IsoMsg pub spec: &'static Spec, /// The segment that the IsoMsg represents pub msg: &'static MessageSegment, /// field data map - name to raw value pub fd_map: std::collections::HashMap<String, Vec<u8>>, /// the bitmap on the iso message pub bmp: bitmap::Bitmap, } /// Operations on IsoMsg impl IsoMsg { pub fn spec(&self) -> &'static Spec { self.spec } /// Returns the value of a field by position in the bitmap pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> { let f = self.msg.fields.iter().find(|f| -> bool { if f.name() == "bitmap" { true } else { false } }).unwrap(); let cf = f.child_by_pos(pos); match self.fd_map.get(cf.name()) { None => { Err(IsoError { msg: format!("no value for field at position {}", pos) }) } Some(v) => { Ok(cf.to_string(v)) } } } /// Returns the value of a top level field like message_type pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> { match self.msg.fields.iter().find(|f| -> bool { if f.name() == name { true } else { false } }) { Some(f) => { Ok(f.to_string(self.fd_map.get(name).unwrap())) } None => { Err(IsoError { msg: format!("No such field : {}", name) }) } } } /// sets a top-level field like message_type etc pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&name.to_string()) { Ok(f) => { self.fd_map.insert(f.name().clone(), f.to_raw(val)); Ok(()) } Err(e) => Err(e) } } /// Sets a field in the bitmap with the given value pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { let cf = f.child_by_pos(pos); self.fd_map.insert(cf.name().clone(), cf.to_raw(val)); self.bmp.set_on(pos); Ok(()) } Err(e) => Err(e) } } /// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { for pos in positions { let cf = f.child_by_pos(*pos); match req_msg.bmp_child_value(*pos) { Ok(res) => { debug!("echoing.. {}: {}", pos, res); self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str())); self.bmp.set_on(*pos); } Err(e) => { return Err(e); } } } Ok(()) } Err(e) => Err(e) } } /// Assembles the messages into a Vec<u8> or a IsoError on failure pub fn assemble(&self) -> Result<Vec<u8>, IsoError> { let mut out_buf: Vec<u8> = Vec::new(); for f in &self.msg.fields { match f.assemble(&mut out_buf, &self) { Ok(_) => {} Err(e) => { return Err(IsoError { msg: e.msg }); } } } Ok(out_buf) } /// Sets F52 based on provided clear pin, and format, key provided via cfg pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> { if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() { return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") }); } match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) { Ok(v) => { self.set_on(52, hex::encode(v).as_str()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } /// Sets F64 or F128 based on algo, padding and key provided via cfg pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> { if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() { return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") }); } if self.bmp.is_on(1) { self.set_on(128, "0000000000000000") } else { self.set_on(64, "0000000000000000") }.unwrap(); let data: Vec<u8> = match self.assemble() { Ok(v) => { v } Err(e) => { return Err(e); } }; debug!("generating mac on: {}", hex::encode(&data)); match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(), &data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) { Ok(v) => { let pos: u32; if self.bmp.is_on(1) { pos = 128; } else { pos = 64; } self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default(); Ok(()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } } fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) { ordered_fields.push(f.name().clone()); f.children().iter().for_each(|f| collect_children(*f, ordered_fields)); } impl Display for IsoMsg { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { let mut res = "".to_string(); let mut ordered_fields = vec![]; self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields)); res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str(); for f in ordered_fields { if self.fd_map.contains_key(f.as_str()) { let field = self.msg.field_by_name(&f).unwrap(); let field_value = &self.fd_map.get(f.as_str()).unwrap(); let mut pos_str: String = String::new(); if field.position() > 0 { pos_str = format!("{:03}", field.position()); } //debug!("** formatting {}",field.name()); res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str(); } } f.write_str(&res).unwrap(); Ok(()) } } /// Returns a spec given its name pub fn spec(_name: &str) -> &'static Spec { //TODO:: handle case of multiple specs, for now just return the first ALL_SPECS.iter().find_map(|(_k, v)| Some(v)).unwrap() } /// Returns a empty IsoMsg that can be used to create a message pub fn new_msg(spec: &'static Spec, seg: &'static MessageSegment) -> IsoMsg
impl Spec { /// Returns a IsoMsg after parsing data or an ParseError on failure pub fn parse(&'static self, data: &mut Vec<u8>) -> Result<IsoMsg, ParseError> { let msg = self.get_msg_segment(data); if msg.is_err() { return Err(ParseError { msg: msg.err().unwrap().msg }); } let mut iso_msg = IsoMsg { spec: &self, msg: &msg.unwrap(), fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), }; let mut cp_data = Cursor::new(data); for f in &iso_msg.msg.fields { debug!("parsing field : {}", f.name()); let res = match f.parse(&mut cp_data, &mut iso_msg.fd_map) { Err(e) => Result::Err(e), Ok(_) => { //if this is "THE" bitmap, then save it on isomsg if f.name() == "bitmap" { let bmp_data = iso_msg.fd_map.get(f.name()).unwrap(); iso_msg.bmp = Bitmap::from_vec(bmp_data); } Ok(()) } }; if res.is_err() { return Result::Err(res.err().unwrap()); } } Ok(iso_msg) } }
{ IsoMsg { spec, msg: seg, fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), } }
identifier_body
iso_spec.rs
//! This module contains implementation of specification, its segments and associated operations //! use std::collections::HashMap; use std::fmt::{Display, Formatter}; use std::io::Cursor; use crate::iso8583::{bitmap, IsoError}; use crate::iso8583::field::{Field, ParseError}; use crate::iso8583::yaml_de::YMessageSegment; use crate::iso8583::bitmap::Bitmap; use crate::iso8583::config::Config; use crate::crypto::pin::generate_pin_block; use crate::crypto::mac::generate_mac; // Reads the spec definitions from YAML file lazy_static! { static ref ALL_SPECS: std::collections::HashMap<String,Spec> ={ println!("current-dir: {}",std::env::current_dir().unwrap().to_str().unwrap()); let mut spec_file = String::new(); match std::env::var_os("SPEC_FILE") { Some(v) => { spec_file.push_str(v.to_str().unwrap()); println!("spec-file: {}",spec_file) } None => panic!("SPEC_FILE env variable not defined!") } let mut specs=HashMap::<String,Spec>::new(); match crate::iso8583::yaml_de::read_spec(spec_file.as_str()){ Ok(spec)=> specs.insert(String::from(spec.name()),spec), Err(e)=> panic!(e.msg) }; specs }; } /// This struct is the definition of the specification - layout of fields etc.. pub struct Spec { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) messages: Vec<MessageSegment>, pub(in crate::iso8583) header_fields: Vec<Box<dyn Field>>, } /// This struct represents a segment in the Spec (a auth request, a response etc) pub struct MessageSegment { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) selector: Vec<String>, pub(in crate::iso8583) fields: Vec<Box<dyn Field>>, } impl From<&YMessageSegment> for MessageSegment { fn from(yms: &YMessageSegment) -> Self { let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new(); yms.fields.iter().for_each(|f| { fields.push(Into::<Box<dyn Field>>::into(f)); }); MessageSegment { name: yms.name.clone(), id: yms.id, selector: yms.selector.iter().map(|s| s.clone()).collect(), fields, } } } /// Operations on MessageSegment impl MessageSegment { /// Returns name of segment pub fn name(&self) -> &str { return self.name.as_str(); } /// Returns a field given it's name if defined in the spec or a IsoError if the field is not found pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> { match self.fields.iter().find(|field| -> bool{ if field.name() == name { true } else { false } }) { None => { //try bitmap let bmp = self.field_by_name(&"bitmap".to_string()).unwrap(); Ok(bmp.child_by_name(name)) } Some(f) => { Ok(f.as_ref()) } } } } impl Spec { pub fn name(&self) -> &str { &self.name } /// Returns a message segment given its name or a IsoError if such a segment is not present pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.name() == name { return Ok(msg); } } return Err(IsoError { msg: format!("{} message not found", name) }); } /// Returns a message that corresponds to the given header value or an IsoError if such a selector /// doesn't exist pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.selector.contains(&header_val.to_string()) { return Ok(msg); } } return Err(IsoError { msg: format!("message not found for header - {}", header_val) }); } /// Returns a segment by first parsing the header field and then matching the header value against /// the selector pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> { let mut selector = String::new(); let mut f2d_map = HashMap::new(); let mut in_buf = Cursor::new(data); for f in &self.header_fields { match f.parse(&mut in_buf, &mut f2d_map) { Ok(_) => { selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars()); } Err(e) => { return Err(IsoError { msg: e.msg }); } } } debug!("computed header value for incoming message = {}", selector); match self.get_message_from_header(selector.as_str()) { Ok(msg) => { Ok(msg) } Err(e) => Err(e) } } } /// This struct represents a parsed message for a given spec pub struct IsoMsg { // The spec associated with this IsoMsg pub spec: &'static Spec, /// The segment that the IsoMsg represents pub msg: &'static MessageSegment, /// field data map - name to raw value pub fd_map: std::collections::HashMap<String, Vec<u8>>, /// the bitmap on the iso message pub bmp: bitmap::Bitmap, } /// Operations on IsoMsg impl IsoMsg { pub fn spec(&self) -> &'static Spec { self.spec } /// Returns the value of a field by position in the bitmap pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> { let f = self.msg.fields.iter().find(|f| -> bool { if f.name() == "bitmap" { true } else { false } }).unwrap(); let cf = f.child_by_pos(pos); match self.fd_map.get(cf.name()) { None => { Err(IsoError { msg: format!("no value for field at position {}", pos) }) } Some(v) => { Ok(cf.to_string(v)) } } } /// Returns the value of a top level field like message_type pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> { match self.msg.fields.iter().find(|f| -> bool { if f.name() == name { true } else { false } }) { Some(f) => { Ok(f.to_string(self.fd_map.get(name).unwrap())) } None => { Err(IsoError { msg: format!("No such field : {}", name) }) } } } /// sets a top-level field like message_type etc pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&name.to_string()) { Ok(f) => { self.fd_map.insert(f.name().clone(), f.to_raw(val)); Ok(()) } Err(e) => Err(e) } } /// Sets a field in the bitmap with the given value pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { let cf = f.child_by_pos(pos); self.fd_map.insert(cf.name().clone(), cf.to_raw(val)); self.bmp.set_on(pos); Ok(()) } Err(e) => Err(e) } } /// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { for pos in positions { let cf = f.child_by_pos(*pos); match req_msg.bmp_child_value(*pos) { Ok(res) => { debug!("echoing.. {}: {}", pos, res); self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str())); self.bmp.set_on(*pos); } Err(e) => { return Err(e); } } } Ok(()) } Err(e) => Err(e) } } /// Assembles the messages into a Vec<u8> or a IsoError on failure pub fn assemble(&self) -> Result<Vec<u8>, IsoError> { let mut out_buf: Vec<u8> = Vec::new(); for f in &self.msg.fields { match f.assemble(&mut out_buf, &self) { Ok(_) => {} Err(e) => { return Err(IsoError { msg: e.msg }); } } } Ok(out_buf) } /// Sets F52 based on provided clear pin, and format, key provided via cfg pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> { if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() { return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") }); } match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) { Ok(v) => { self.set_on(52, hex::encode(v).as_str()) } Err(e) => { Err(IsoError { msg: e.msg }) }
} /// Sets F64 or F128 based on algo, padding and key provided via cfg pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> { if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() { return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") }); } if self.bmp.is_on(1) { self.set_on(128, "0000000000000000") } else { self.set_on(64, "0000000000000000") }.unwrap(); let data: Vec<u8> = match self.assemble() { Ok(v) => { v } Err(e) => { return Err(e); } }; debug!("generating mac on: {}", hex::encode(&data)); match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(), &data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) { Ok(v) => { let pos: u32; if self.bmp.is_on(1) { pos = 128; } else { pos = 64; } self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default(); Ok(()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } } fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) { ordered_fields.push(f.name().clone()); f.children().iter().for_each(|f| collect_children(*f, ordered_fields)); } impl Display for IsoMsg { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { let mut res = "".to_string(); let mut ordered_fields = vec![]; self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields)); res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str(); for f in ordered_fields { if self.fd_map.contains_key(f.as_str()) { let field = self.msg.field_by_name(&f).unwrap(); let field_value = &self.fd_map.get(f.as_str()).unwrap(); let mut pos_str: String = String::new(); if field.position() > 0 { pos_str = format!("{:03}", field.position()); } //debug!("** formatting {}",field.name()); res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str(); } } f.write_str(&res).unwrap(); Ok(()) } } /// Returns a spec given its name pub fn spec(_name: &str) -> &'static Spec { //TODO:: handle case of multiple specs, for now just return the first ALL_SPECS.iter().find_map(|(_k, v)| Some(v)).unwrap() } /// Returns a empty IsoMsg that can be used to create a message pub fn new_msg(spec: &'static Spec, seg: &'static MessageSegment) -> IsoMsg { IsoMsg { spec, msg: seg, fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), } } impl Spec { /// Returns a IsoMsg after parsing data or an ParseError on failure pub fn parse(&'static self, data: &mut Vec<u8>) -> Result<IsoMsg, ParseError> { let msg = self.get_msg_segment(data); if msg.is_err() { return Err(ParseError { msg: msg.err().unwrap().msg }); } let mut iso_msg = IsoMsg { spec: &self, msg: &msg.unwrap(), fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), }; let mut cp_data = Cursor::new(data); for f in &iso_msg.msg.fields { debug!("parsing field : {}", f.name()); let res = match f.parse(&mut cp_data, &mut iso_msg.fd_map) { Err(e) => Result::Err(e), Ok(_) => { //if this is "THE" bitmap, then save it on isomsg if f.name() == "bitmap" { let bmp_data = iso_msg.fd_map.get(f.name()).unwrap(); iso_msg.bmp = Bitmap::from_vec(bmp_data); } Ok(()) } }; if res.is_err() { return Result::Err(res.err().unwrap()); } } Ok(iso_msg) } }
}
random_line_split
iso_spec.rs
//! This module contains implementation of specification, its segments and associated operations //! use std::collections::HashMap; use std::fmt::{Display, Formatter}; use std::io::Cursor; use crate::iso8583::{bitmap, IsoError}; use crate::iso8583::field::{Field, ParseError}; use crate::iso8583::yaml_de::YMessageSegment; use crate::iso8583::bitmap::Bitmap; use crate::iso8583::config::Config; use crate::crypto::pin::generate_pin_block; use crate::crypto::mac::generate_mac; // Reads the spec definitions from YAML file lazy_static! { static ref ALL_SPECS: std::collections::HashMap<String,Spec> ={ println!("current-dir: {}",std::env::current_dir().unwrap().to_str().unwrap()); let mut spec_file = String::new(); match std::env::var_os("SPEC_FILE") { Some(v) => { spec_file.push_str(v.to_str().unwrap()); println!("spec-file: {}",spec_file) } None => panic!("SPEC_FILE env variable not defined!") } let mut specs=HashMap::<String,Spec>::new(); match crate::iso8583::yaml_de::read_spec(spec_file.as_str()){ Ok(spec)=> specs.insert(String::from(spec.name()),spec), Err(e)=> panic!(e.msg) }; specs }; } /// This struct is the definition of the specification - layout of fields etc.. pub struct Spec { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) messages: Vec<MessageSegment>, pub(in crate::iso8583) header_fields: Vec<Box<dyn Field>>, } /// This struct represents a segment in the Spec (a auth request, a response etc) pub struct MessageSegment { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) selector: Vec<String>, pub(in crate::iso8583) fields: Vec<Box<dyn Field>>, } impl From<&YMessageSegment> for MessageSegment { fn from(yms: &YMessageSegment) -> Self { let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new(); yms.fields.iter().for_each(|f| { fields.push(Into::<Box<dyn Field>>::into(f)); }); MessageSegment { name: yms.name.clone(), id: yms.id, selector: yms.selector.iter().map(|s| s.clone()).collect(), fields, } } } /// Operations on MessageSegment impl MessageSegment { /// Returns name of segment pub fn name(&self) -> &str { return self.name.as_str(); } /// Returns a field given it's name if defined in the spec or a IsoError if the field is not found pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> { match self.fields.iter().find(|field| -> bool{ if field.name() == name { true } else { false } }) { None => { //try bitmap let bmp = self.field_by_name(&"bitmap".to_string()).unwrap(); Ok(bmp.child_by_name(name)) } Some(f) => { Ok(f.as_ref()) } } } } impl Spec { pub fn name(&self) -> &str { &self.name } /// Returns a message segment given its name or a IsoError if such a segment is not present pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.name() == name { return Ok(msg); } } return Err(IsoError { msg: format!("{} message not found", name) }); } /// Returns a message that corresponds to the given header value or an IsoError if such a selector /// doesn't exist pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.selector.contains(&header_val.to_string()) { return Ok(msg); } } return Err(IsoError { msg: format!("message not found for header - {}", header_val) }); } /// Returns a segment by first parsing the header field and then matching the header value against /// the selector pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> { let mut selector = String::new(); let mut f2d_map = HashMap::new(); let mut in_buf = Cursor::new(data); for f in &self.header_fields { match f.parse(&mut in_buf, &mut f2d_map) { Ok(_) => { selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars()); } Err(e) => { return Err(IsoError { msg: e.msg }); } } } debug!("computed header value for incoming message = {}", selector); match self.get_message_from_header(selector.as_str()) { Ok(msg) => { Ok(msg) } Err(e) => Err(e) } } } /// This struct represents a parsed message for a given spec pub struct IsoMsg { // The spec associated with this IsoMsg pub spec: &'static Spec, /// The segment that the IsoMsg represents pub msg: &'static MessageSegment, /// field data map - name to raw value pub fd_map: std::collections::HashMap<String, Vec<u8>>, /// the bitmap on the iso message pub bmp: bitmap::Bitmap, } /// Operations on IsoMsg impl IsoMsg { pub fn spec(&self) -> &'static Spec { self.spec } /// Returns the value of a field by position in the bitmap pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> { let f = self.msg.fields.iter().find(|f| -> bool { if f.name() == "bitmap" { true } else { false } }).unwrap(); let cf = f.child_by_pos(pos); match self.fd_map.get(cf.name()) { None => { Err(IsoError { msg: format!("no value for field at position {}", pos) }) } Some(v) => { Ok(cf.to_string(v)) } } } /// Returns the value of a top level field like message_type pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> { match self.msg.fields.iter().find(|f| -> bool { if f.name() == name { true } else { false } }) { Some(f) => { Ok(f.to_string(self.fd_map.get(name).unwrap())) } None => { Err(IsoError { msg: format!("No such field : {}", name) }) } } } /// sets a top-level field like message_type etc pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&name.to_string()) { Ok(f) => { self.fd_map.insert(f.name().clone(), f.to_raw(val)); Ok(()) } Err(e) => Err(e) } } /// Sets a field in the bitmap with the given value pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { let cf = f.child_by_pos(pos); self.fd_map.insert(cf.name().clone(), cf.to_raw(val)); self.bmp.set_on(pos); Ok(()) } Err(e) => Err(e) } } /// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { for pos in positions { let cf = f.child_by_pos(*pos); match req_msg.bmp_child_value(*pos) { Ok(res) => { debug!("echoing.. {}: {}", pos, res); self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str())); self.bmp.set_on(*pos); } Err(e) => { return Err(e); } } } Ok(()) } Err(e) => Err(e) } } /// Assembles the messages into a Vec<u8> or a IsoError on failure pub fn assemble(&self) -> Result<Vec<u8>, IsoError> { let mut out_buf: Vec<u8> = Vec::new(); for f in &self.msg.fields { match f.assemble(&mut out_buf, &self) { Ok(_) => {} Err(e) => { return Err(IsoError { msg: e.msg }); } } } Ok(out_buf) } /// Sets F52 based on provided clear pin, and format, key provided via cfg pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> { if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none()
match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) { Ok(v) => { self.set_on(52, hex::encode(v).as_str()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } /// Sets F64 or F128 based on algo, padding and key provided via cfg pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> { if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() { return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") }); } if self.bmp.is_on(1) { self.set_on(128, "0000000000000000") } else { self.set_on(64, "0000000000000000") }.unwrap(); let data: Vec<u8> = match self.assemble() { Ok(v) => { v } Err(e) => { return Err(e); } }; debug!("generating mac on: {}", hex::encode(&data)); match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(), &data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) { Ok(v) => { let pos: u32; if self.bmp.is_on(1) { pos = 128; } else { pos = 64; } self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default(); Ok(()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } } fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) { ordered_fields.push(f.name().clone()); f.children().iter().for_each(|f| collect_children(*f, ordered_fields)); } impl Display for IsoMsg { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { let mut res = "".to_string(); let mut ordered_fields = vec![]; self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields)); res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str(); for f in ordered_fields { if self.fd_map.contains_key(f.as_str()) { let field = self.msg.field_by_name(&f).unwrap(); let field_value = &self.fd_map.get(f.as_str()).unwrap(); let mut pos_str: String = String::new(); if field.position() > 0 { pos_str = format!("{:03}", field.position()); } //debug!("** formatting {}",field.name()); res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str(); } } f.write_str(&res).unwrap(); Ok(()) } } /// Returns a spec given its name pub fn spec(_name: &str) -> &'static Spec { //TODO:: handle case of multiple specs, for now just return the first ALL_SPECS.iter().find_map(|(_k, v)| Some(v)).unwrap() } /// Returns a empty IsoMsg that can be used to create a message pub fn new_msg(spec: &'static Spec, seg: &'static MessageSegment) -> IsoMsg { IsoMsg { spec, msg: seg, fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), } } impl Spec { /// Returns a IsoMsg after parsing data or an ParseError on failure pub fn parse(&'static self, data: &mut Vec<u8>) -> Result<IsoMsg, ParseError> { let msg = self.get_msg_segment(data); if msg.is_err() { return Err(ParseError { msg: msg.err().unwrap().msg }); } let mut iso_msg = IsoMsg { spec: &self, msg: &msg.unwrap(), fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), }; let mut cp_data = Cursor::new(data); for f in &iso_msg.msg.fields { debug!("parsing field : {}", f.name()); let res = match f.parse(&mut cp_data, &mut iso_msg.fd_map) { Err(e) => Result::Err(e), Ok(_) => { //if this is "THE" bitmap, then save it on isomsg if f.name() == "bitmap" { let bmp_data = iso_msg.fd_map.get(f.name()).unwrap(); iso_msg.bmp = Bitmap::from_vec(bmp_data); } Ok(()) } }; if res.is_err() { return Result::Err(res.err().unwrap()); } } Ok(iso_msg) } }
{ return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") }); }
conditional_block
iso_spec.rs
//! This module contains implementation of specification, its segments and associated operations //! use std::collections::HashMap; use std::fmt::{Display, Formatter}; use std::io::Cursor; use crate::iso8583::{bitmap, IsoError}; use crate::iso8583::field::{Field, ParseError}; use crate::iso8583::yaml_de::YMessageSegment; use crate::iso8583::bitmap::Bitmap; use crate::iso8583::config::Config; use crate::crypto::pin::generate_pin_block; use crate::crypto::mac::generate_mac; // Reads the spec definitions from YAML file lazy_static! { static ref ALL_SPECS: std::collections::HashMap<String,Spec> ={ println!("current-dir: {}",std::env::current_dir().unwrap().to_str().unwrap()); let mut spec_file = String::new(); match std::env::var_os("SPEC_FILE") { Some(v) => { spec_file.push_str(v.to_str().unwrap()); println!("spec-file: {}",spec_file) } None => panic!("SPEC_FILE env variable not defined!") } let mut specs=HashMap::<String,Spec>::new(); match crate::iso8583::yaml_de::read_spec(spec_file.as_str()){ Ok(spec)=> specs.insert(String::from(spec.name()),spec), Err(e)=> panic!(e.msg) }; specs }; } /// This struct is the definition of the specification - layout of fields etc.. pub struct Spec { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) messages: Vec<MessageSegment>, pub(in crate::iso8583) header_fields: Vec<Box<dyn Field>>, } /// This struct represents a segment in the Spec (a auth request, a response etc) pub struct MessageSegment { pub(in crate::iso8583) name: String, #[allow(dead_code)] pub(in crate::iso8583) id: u32, pub(in crate::iso8583) selector: Vec<String>, pub(in crate::iso8583) fields: Vec<Box<dyn Field>>, } impl From<&YMessageSegment> for MessageSegment { fn from(yms: &YMessageSegment) -> Self { let mut fields: Vec<Box<dyn Field>> = Vec::<Box<dyn Field>>::new(); yms.fields.iter().for_each(|f| { fields.push(Into::<Box<dyn Field>>::into(f)); }); MessageSegment { name: yms.name.clone(), id: yms.id, selector: yms.selector.iter().map(|s| s.clone()).collect(), fields, } } } /// Operations on MessageSegment impl MessageSegment { /// Returns name of segment pub fn name(&self) -> &str { return self.name.as_str(); } /// Returns a field given it's name if defined in the spec or a IsoError if the field is not found pub fn field_by_name(&self, name: &String) -> Result<&dyn Field, IsoError> { match self.fields.iter().find(|field| -> bool{ if field.name() == name { true } else { false } }) { None => { //try bitmap let bmp = self.field_by_name(&"bitmap".to_string()).unwrap(); Ok(bmp.child_by_name(name)) } Some(f) => { Ok(f.as_ref()) } } } } impl Spec { pub fn name(&self) -> &str { &self.name } /// Returns a message segment given its name or a IsoError if such a segment is not present pub fn get_message(&self, name: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.name() == name { return Ok(msg); } } return Err(IsoError { msg: format!("{} message not found", name) }); } /// Returns a message that corresponds to the given header value or an IsoError if such a selector /// doesn't exist pub fn get_message_from_header(&self, header_val: &str) -> Result<&MessageSegment, IsoError> { for msg in &self.messages { if msg.selector.contains(&header_val.to_string()) { return Ok(msg); } } return Err(IsoError { msg: format!("message not found for header - {}", header_val) }); } /// Returns a segment by first parsing the header field and then matching the header value against /// the selector pub fn get_msg_segment(&'static self, data: &Vec<u8>) -> Result<&MessageSegment, IsoError> { let mut selector = String::new(); let mut f2d_map = HashMap::new(); let mut in_buf = Cursor::new(data); for f in &self.header_fields { match f.parse(&mut in_buf, &mut f2d_map) { Ok(_) => { selector.extend(f.to_string(f2d_map.get(f.name()).unwrap()).chars()); } Err(e) => { return Err(IsoError { msg: e.msg }); } } } debug!("computed header value for incoming message = {}", selector); match self.get_message_from_header(selector.as_str()) { Ok(msg) => { Ok(msg) } Err(e) => Err(e) } } } /// This struct represents a parsed message for a given spec pub struct IsoMsg { // The spec associated with this IsoMsg pub spec: &'static Spec, /// The segment that the IsoMsg represents pub msg: &'static MessageSegment, /// field data map - name to raw value pub fd_map: std::collections::HashMap<String, Vec<u8>>, /// the bitmap on the iso message pub bmp: bitmap::Bitmap, } /// Operations on IsoMsg impl IsoMsg { pub fn spec(&self) -> &'static Spec { self.spec } /// Returns the value of a field by position in the bitmap pub fn bmp_child_value(&self, pos: u32) -> Result<String, IsoError> { let f = self.msg.fields.iter().find(|f| -> bool { if f.name() == "bitmap" { true } else { false } }).unwrap(); let cf = f.child_by_pos(pos); match self.fd_map.get(cf.name()) { None => { Err(IsoError { msg: format!("no value for field at position {}", pos) }) } Some(v) => { Ok(cf.to_string(v)) } } } /// Returns the value of a top level field like message_type pub fn get_field_value(&self, name: &String) -> Result<String, IsoError> { match self.msg.fields.iter().find(|f| -> bool { if f.name() == name { true } else { false } }) { Some(f) => { Ok(f.to_string(self.fd_map.get(name).unwrap())) } None => { Err(IsoError { msg: format!("No such field : {}", name) }) } } } /// sets a top-level field like message_type etc pub fn set(&mut self, name: &str, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&name.to_string()) { Ok(f) => { self.fd_map.insert(f.name().clone(), f.to_raw(val)); Ok(()) } Err(e) => Err(e) } } /// Sets a field in the bitmap with the given value pub fn set_on(&mut self, pos: u32, val: &str) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { let cf = f.child_by_pos(pos); self.fd_map.insert(cf.name().clone(), cf.to_raw(val)); self.bmp.set_on(pos); Ok(()) } Err(e) => Err(e) } } /// Echoes (sets the value with the identical field in req_msg) for given positions in the bitmap pub fn echo_from(&mut self, req_msg: &IsoMsg, positions: &[u32]) -> Result<(), IsoError> { match self.msg.field_by_name(&"bitmap".to_string()) { Ok(f) => { for pos in positions { let cf = f.child_by_pos(*pos); match req_msg.bmp_child_value(*pos) { Ok(res) => { debug!("echoing.. {}: {}", pos, res); self.fd_map.insert(cf.name().clone(), cf.to_raw(res.as_str())); self.bmp.set_on(*pos); } Err(e) => { return Err(e); } } } Ok(()) } Err(e) => Err(e) } } /// Assembles the messages into a Vec<u8> or a IsoError on failure pub fn assemble(&self) -> Result<Vec<u8>, IsoError> { let mut out_buf: Vec<u8> = Vec::new(); for f in &self.msg.fields { match f.assemble(&mut out_buf, &self) { Ok(_) => {} Err(e) => { return Err(IsoError { msg: e.msg }); } } } Ok(out_buf) } /// Sets F52 based on provided clear pin, and format, key provided via cfg pub fn set_pin(&mut self, pin: &str, pan: &str, cfg: &Config) -> Result<(), IsoError> { if cfg.get_pin_fmt().is_none() || cfg.get_pin_key().is_none() { return Err(IsoError { msg: format!("missing pin_format or key in call to set_pin") }); } match generate_pin_block(&cfg.get_pin_fmt().as_ref().unwrap(), pin, pan, &hex::decode(cfg.get_pin_key().as_ref().unwrap().as_str()).unwrap()) { Ok(v) => { self.set_on(52, hex::encode(v).as_str()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } /// Sets F64 or F128 based on algo, padding and key provided via cfg pub fn set_mac(&mut self, cfg: &Config) -> Result<(), IsoError> { if cfg.get_mac_algo().is_none() || cfg.get_mac_padding().is_none() || cfg.get_mac_key().is_none() { return Err(IsoError { msg: format!("missing mac_algo or padding or key in call to set_mac") }); } if self.bmp.is_on(1) { self.set_on(128, "0000000000000000") } else { self.set_on(64, "0000000000000000") }.unwrap(); let data: Vec<u8> = match self.assemble() { Ok(v) => { v } Err(e) => { return Err(e); } }; debug!("generating mac on: {}", hex::encode(&data)); match generate_mac(&cfg.get_mac_algo().as_ref().unwrap(), &cfg.get_mac_padding().as_ref().unwrap(), &data[0..data.len() - 8].to_vec(), &hex::decode(cfg.get_mac_key().as_ref().unwrap()).unwrap()) { Ok(v) => { let pos: u32; if self.bmp.is_on(1) { pos = 128; } else { pos = 64; } self.set_on(pos, hex::encode(v).as_str()).unwrap_or_default(); Ok(()) } Err(e) => { Err(IsoError { msg: e.msg }) } } } } fn collect_children(f: &dyn Field, ordered_fields: &mut Vec<String>) { ordered_fields.push(f.name().clone()); f.children().iter().for_each(|f| collect_children(*f, ordered_fields)); } impl Display for IsoMsg { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { let mut res = "".to_string(); let mut ordered_fields = vec![]; self.msg.fields.iter().for_each(|f| collect_children(f.as_ref(), &mut ordered_fields)); res = res + format!("\n{:20.40} : {:5} : {} ", "-Field-", "-Position-", "-Field Value-").as_str(); for f in ordered_fields { if self.fd_map.contains_key(f.as_str()) { let field = self.msg.field_by_name(&f).unwrap(); let field_value = &self.fd_map.get(f.as_str()).unwrap(); let mut pos_str: String = String::new(); if field.position() > 0 { pos_str = format!("{:03}", field.position()); } //debug!("** formatting {}",field.name()); res = res + format!("\n{:20.40} : {:^10} : {} ", f, pos_str.as_str(), field.to_string(field_value)).as_str(); } } f.write_str(&res).unwrap(); Ok(()) } } /// Returns a spec given its name pub fn
(_name: &str) -> &'static Spec { //TODO:: handle case of multiple specs, for now just return the first ALL_SPECS.iter().find_map(|(_k, v)| Some(v)).unwrap() } /// Returns a empty IsoMsg that can be used to create a message pub fn new_msg(spec: &'static Spec, seg: &'static MessageSegment) -> IsoMsg { IsoMsg { spec, msg: seg, fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), } } impl Spec { /// Returns a IsoMsg after parsing data or an ParseError on failure pub fn parse(&'static self, data: &mut Vec<u8>) -> Result<IsoMsg, ParseError> { let msg = self.get_msg_segment(data); if msg.is_err() { return Err(ParseError { msg: msg.err().unwrap().msg }); } let mut iso_msg = IsoMsg { spec: &self, msg: &msg.unwrap(), fd_map: HashMap::new(), bmp: Bitmap::new(0, 0, 0), }; let mut cp_data = Cursor::new(data); for f in &iso_msg.msg.fields { debug!("parsing field : {}", f.name()); let res = match f.parse(&mut cp_data, &mut iso_msg.fd_map) { Err(e) => Result::Err(e), Ok(_) => { //if this is "THE" bitmap, then save it on isomsg if f.name() == "bitmap" { let bmp_data = iso_msg.fd_map.get(f.name()).unwrap(); iso_msg.bmp = Bitmap::from_vec(bmp_data); } Ok(()) } }; if res.is_err() { return Result::Err(res.err().unwrap()); } } Ok(iso_msg) } }
spec
identifier_name
spatial.rs
use inle_alloc::temp::*; use inle_app::app::Engine_State; use inle_ecs::ecs_world::{Ecs_World, Entity, Evt_Entity_Destroyed}; use inle_events::evt_register::{with_cb_data, wrap_cb_data, Event_Callback_Data}; use inle_math::vector::Vec2f; use inle_physics::collider::C_Collider; use inle_physics::phys_world::{Collider_Handle, Physics_World}; use inle_physics::spatial::Spatial_Accelerator; use std::cmp::Ordering; use std::collections::HashMap; #[cfg(debug_assertions)] use {inle_debug::painter::Debug_Painter, std::collections::HashSet}; // @Speed: tune these numbers const CHUNK_WIDTH: f32 = 200.; const CHUNK_HEIGHT: f32 = 200.; #[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct Chunk_Coords { pub x: i32, pub y: i32, } impl PartialOrd for Chunk_Coords { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Chunk_Coords { fn cmp(&self, other: &Self) -> Ordering { match self.y.cmp(&other.y) { Ordering::Greater => Ordering::Greater, Ordering::Less => Ordering::Less, Ordering::Equal => self.x.cmp(&other.x), } } } impl Chunk_Coords { pub fn from_pos(pos: Vec2f) -> Self { Self { x: (pos.x / CHUNK_WIDTH).floor() as i32, y: (pos.y / CHUNK_HEIGHT).floor() as i32, } } pub fn to_world_pos(self) -> Vec2f
} pub struct World_Chunks { chunks: HashMap<Chunk_Coords, World_Chunk>, to_destroy: Event_Callback_Data, } #[derive(Default, Debug)] pub struct World_Chunk { pub colliders: Vec<Collider_Handle>, } impl World_Chunks { pub fn new() -> Self { Self { chunks: HashMap::new(), to_destroy: wrap_cb_data(Vec::<Entity>::new()), } } pub fn init(&mut self, engine_state: &mut Engine_State) { engine_state .systems .evt_register .subscribe::<Evt_Entity_Destroyed>( Box::new(|entity, to_destroy| { with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| { to_destroy.push(entity); }); }), Some(self.to_destroy.clone()), ); } pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) { trace!("world_chunks::update"); let mut to_remove = vec![]; with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| { for &entity in to_destroy.iter() { if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) { for (cld, handle) in phys_world.get_all_colliders_with_handles(collider.phys_body_handle) { to_remove.push((handle, cld.position, cld.shape.extent())); } } } to_destroy.clear(); }); for (cld, pos, extent) in to_remove { self.remove_collider(cld, pos, extent); } } pub fn n_chunks(&self) -> usize { self.chunks.len() } pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.add_collider_coords(cld_handle, coords); } } fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self .chunks .entry(coords) .or_insert_with(World_Chunk::default); debug_assert!( !chunk.colliders.contains(&cld_handle), "Duplicate collider {:?} in chunk {:?}!", cld_handle, coords ); chunk.colliders.push(cld_handle); } pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.remove_collider_coords(cld_handle, coords); } } fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| { fatal!( "Collider {:?} should be in chunk {:?}, but that chunk does not exist.", cld_handle, coords ) }); let idx = chunk.colliders.iter().position(|&c| c == cld_handle); if let Some(idx) = idx { chunk.colliders.remove(idx); if chunk.colliders.is_empty() { self.chunks.remove(&coords); } } else { lerr!( "Collider {:?} not found in expected chunk {:?}.", cld_handle, coords ); } } pub fn update_collider( &mut self, cld_handle: Collider_Handle, prev_pos: Vec2f, new_pos: Vec2f, extent: Vec2f, frame_alloc: &mut Temp_Allocator, ) { trace!("world_chunks::update_collider"); let mut prev_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords); let prev_coords = unsafe { prev_coords.into_read_only() }; let mut new_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(new_pos, extent, &mut new_coords); let new_coords = unsafe { new_coords.into_read_only() }; let mut all_chunks = excl_temp_array(frame_alloc); // Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0, // while `chunks_to_remove` starts at index `new_coords.len()`. // This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()` // chunks to remove. unsafe { all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len()); } let mut n_chunks_to_add = 0; let mut n_chunks_to_remove = 0; let chunks_to_add_offset = 0; let chunks_to_remove_offset = new_coords.len(); // Find chunks to add and to remove in O(n). // This algorithm assumes that both prev_coords and new_coords are sorted and deduped. let mut p_idx = 0; let mut n_idx = 0; while p_idx < prev_coords.len() && n_idx < new_coords.len() { let pc = prev_coords[p_idx]; let nc = new_coords[n_idx]; match pc.cmp(&nc) { Ordering::Less => { all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc; n_chunks_to_remove += 1; p_idx += 1; } Ordering::Greater => { all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc; n_chunks_to_add += 1; n_idx += 1; } Ordering::Equal => { p_idx += 1; n_idx += 1; } } } if p_idx < prev_coords.len() { let diff = prev_coords.len() - p_idx; for i in 0..diff { all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] = prev_coords[p_idx + i]; } n_chunks_to_remove += diff; } else if n_idx < new_coords.len() { let diff = new_coords.len() - n_idx; for i in 0..diff { all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i]; } n_chunks_to_add += diff; } #[cfg(debug_assertions)] { let to_remove = all_chunks .iter() .cloned() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) .collect::<HashSet<_>>(); let to_add = all_chunks .iter() .cloned() .skip(chunks_to_add_offset) .take(n_chunks_to_add) .collect::<HashSet<_>>(); debug_assert_eq!(to_remove.intersection(&to_add).count(), 0); } for coord in all_chunks .iter() .skip(chunks_to_add_offset) .take(n_chunks_to_add) { self.add_collider_coords(cld_handle, *coord); } for coord in all_chunks .iter() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) { self.remove_collider_coords(cld_handle, *coord); } } fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T) where T: Extend<Chunk_Coords>, { trace!("get_all_chunks_containing"); #[cfg(debug_assertions)] let mut chk_coords = vec![]; // We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game! let pos = pos - extent * 0.5; let coords_topleft = Chunk_Coords::from_pos(pos); coords.extend(Some(coords_topleft)); #[cfg(debug_assertions)] chk_coords.push(coords_topleft); let coords_botright = Chunk_Coords::from_pos(pos + extent); // Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp) for y in 0..=coords_botright.y - coords_topleft.y { for x in 0..=coords_botright.x - coords_topleft.x { if x == 0 && y == 0 { continue; } coords.extend(Some(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), ))); #[cfg(debug_assertions)] chk_coords.push(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), )); } } #[cfg(debug_assertions)] { // Result should be sorted and deduped // @WaitForStable //debug_assert!(coords.iter().is_sorted()); for i in 1..chk_coords.len() { debug_assert!(chk_coords[i] > chk_coords[i - 1]); } let mut deduped = chk_coords.clone(); deduped.dedup(); debug_assert!(chk_coords.len() == deduped.len()); } } } impl Spatial_Accelerator<Collider_Handle> for World_Chunks { fn get_neighbours<R>(&self, pos: Vec2f, extent: Vec2f, result: &mut R) where R: Extend<Collider_Handle>, { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { if let Some(chunk) = self.chunks.get(&coords) { result.extend(chunk.colliders.iter().copied()); } } } } #[cfg(debug_assertions)] impl World_Chunks { pub fn debug_draw(&self, painter: &mut Debug_Painter) { use inle_common::colors; use inle_common::paint_props::Paint_Properties; use inle_math::transform::Transform2D; if self.chunks.is_empty() { return; } let max_colliders = self .chunks .iter() .map(|(_, chk)| chk.colliders.len()) .max() .unwrap_or(0) as f32; for (coords, chunk) in &self.chunks { let world_pos = v2!(coords.to_world_pos().x, coords.to_world_pos().y); let col = colors::lerp_col( colors::rgba(0, 150, 0, 100), colors::rgba(150, 0, 0, 100), chunk.colliders.len() as f32 / max_colliders, ); painter.add_rect( v2!(CHUNK_WIDTH, CHUNK_HEIGHT), &Transform2D::from_pos(world_pos), Paint_Properties { color: col, border_color: colors::darken(col, 0.7), border_thick: (CHUNK_WIDTH / 50.).max(5.), ..Default::default() }, ); painter.add_text( &format!("{},{}: {}", coords.x, coords.y, chunk.colliders.len()), world_pos + v2!(10., 5.), (CHUNK_WIDTH as u16 / 10).max(20), colors::rgba(50, 220, 0, 250), ); } } } #[cfg(tests)] mod tests { use super::*; #[test] fn chunk_coords_ord() { assert!(Chunk_Coords { x: 0, y: 0 } < Chunk_Coords { x: 1, y: 0 }); assert!(Chunk_Coords { x: 1, y: 0 } < Chunk_Coords { x: 0, y: 1 }); assert!(Chunk_Coords { x: 1, y: 1 } < Chunk_Coords { x: 2, y: 1 }); assert!(Chunk_Coords { x: 2, y: 1 } < Chunk_Coords { x: 1, y: 2 }); } }
{ Vec2f { x: self.x as f32 * CHUNK_WIDTH, y: self.y as f32 * CHUNK_HEIGHT, } }
identifier_body
spatial.rs
use inle_alloc::temp::*; use inle_app::app::Engine_State; use inle_ecs::ecs_world::{Ecs_World, Entity, Evt_Entity_Destroyed}; use inle_events::evt_register::{with_cb_data, wrap_cb_data, Event_Callback_Data}; use inle_math::vector::Vec2f; use inle_physics::collider::C_Collider; use inle_physics::phys_world::{Collider_Handle, Physics_World}; use inle_physics::spatial::Spatial_Accelerator; use std::cmp::Ordering; use std::collections::HashMap; #[cfg(debug_assertions)] use {inle_debug::painter::Debug_Painter, std::collections::HashSet}; // @Speed: tune these numbers const CHUNK_WIDTH: f32 = 200.; const CHUNK_HEIGHT: f32 = 200.; #[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct Chunk_Coords { pub x: i32, pub y: i32, } impl PartialOrd for Chunk_Coords { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Chunk_Coords { fn cmp(&self, other: &Self) -> Ordering { match self.y.cmp(&other.y) { Ordering::Greater => Ordering::Greater, Ordering::Less => Ordering::Less, Ordering::Equal => self.x.cmp(&other.x), } } } impl Chunk_Coords { pub fn from_pos(pos: Vec2f) -> Self { Self {
pub fn to_world_pos(self) -> Vec2f { Vec2f { x: self.x as f32 * CHUNK_WIDTH, y: self.y as f32 * CHUNK_HEIGHT, } } } pub struct World_Chunks { chunks: HashMap<Chunk_Coords, World_Chunk>, to_destroy: Event_Callback_Data, } #[derive(Default, Debug)] pub struct World_Chunk { pub colliders: Vec<Collider_Handle>, } impl World_Chunks { pub fn new() -> Self { Self { chunks: HashMap::new(), to_destroy: wrap_cb_data(Vec::<Entity>::new()), } } pub fn init(&mut self, engine_state: &mut Engine_State) { engine_state .systems .evt_register .subscribe::<Evt_Entity_Destroyed>( Box::new(|entity, to_destroy| { with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| { to_destroy.push(entity); }); }), Some(self.to_destroy.clone()), ); } pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) { trace!("world_chunks::update"); let mut to_remove = vec![]; with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| { for &entity in to_destroy.iter() { if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) { for (cld, handle) in phys_world.get_all_colliders_with_handles(collider.phys_body_handle) { to_remove.push((handle, cld.position, cld.shape.extent())); } } } to_destroy.clear(); }); for (cld, pos, extent) in to_remove { self.remove_collider(cld, pos, extent); } } pub fn n_chunks(&self) -> usize { self.chunks.len() } pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.add_collider_coords(cld_handle, coords); } } fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self .chunks .entry(coords) .or_insert_with(World_Chunk::default); debug_assert!( !chunk.colliders.contains(&cld_handle), "Duplicate collider {:?} in chunk {:?}!", cld_handle, coords ); chunk.colliders.push(cld_handle); } pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.remove_collider_coords(cld_handle, coords); } } fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| { fatal!( "Collider {:?} should be in chunk {:?}, but that chunk does not exist.", cld_handle, coords ) }); let idx = chunk.colliders.iter().position(|&c| c == cld_handle); if let Some(idx) = idx { chunk.colliders.remove(idx); if chunk.colliders.is_empty() { self.chunks.remove(&coords); } } else { lerr!( "Collider {:?} not found in expected chunk {:?}.", cld_handle, coords ); } } pub fn update_collider( &mut self, cld_handle: Collider_Handle, prev_pos: Vec2f, new_pos: Vec2f, extent: Vec2f, frame_alloc: &mut Temp_Allocator, ) { trace!("world_chunks::update_collider"); let mut prev_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords); let prev_coords = unsafe { prev_coords.into_read_only() }; let mut new_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(new_pos, extent, &mut new_coords); let new_coords = unsafe { new_coords.into_read_only() }; let mut all_chunks = excl_temp_array(frame_alloc); // Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0, // while `chunks_to_remove` starts at index `new_coords.len()`. // This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()` // chunks to remove. unsafe { all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len()); } let mut n_chunks_to_add = 0; let mut n_chunks_to_remove = 0; let chunks_to_add_offset = 0; let chunks_to_remove_offset = new_coords.len(); // Find chunks to add and to remove in O(n). // This algorithm assumes that both prev_coords and new_coords are sorted and deduped. let mut p_idx = 0; let mut n_idx = 0; while p_idx < prev_coords.len() && n_idx < new_coords.len() { let pc = prev_coords[p_idx]; let nc = new_coords[n_idx]; match pc.cmp(&nc) { Ordering::Less => { all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc; n_chunks_to_remove += 1; p_idx += 1; } Ordering::Greater => { all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc; n_chunks_to_add += 1; n_idx += 1; } Ordering::Equal => { p_idx += 1; n_idx += 1; } } } if p_idx < prev_coords.len() { let diff = prev_coords.len() - p_idx; for i in 0..diff { all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] = prev_coords[p_idx + i]; } n_chunks_to_remove += diff; } else if n_idx < new_coords.len() { let diff = new_coords.len() - n_idx; for i in 0..diff { all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i]; } n_chunks_to_add += diff; } #[cfg(debug_assertions)] { let to_remove = all_chunks .iter() .cloned() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) .collect::<HashSet<_>>(); let to_add = all_chunks .iter() .cloned() .skip(chunks_to_add_offset) .take(n_chunks_to_add) .collect::<HashSet<_>>(); debug_assert_eq!(to_remove.intersection(&to_add).count(), 0); } for coord in all_chunks .iter() .skip(chunks_to_add_offset) .take(n_chunks_to_add) { self.add_collider_coords(cld_handle, *coord); } for coord in all_chunks .iter() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) { self.remove_collider_coords(cld_handle, *coord); } } fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T) where T: Extend<Chunk_Coords>, { trace!("get_all_chunks_containing"); #[cfg(debug_assertions)] let mut chk_coords = vec![]; // We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game! let pos = pos - extent * 0.5; let coords_topleft = Chunk_Coords::from_pos(pos); coords.extend(Some(coords_topleft)); #[cfg(debug_assertions)] chk_coords.push(coords_topleft); let coords_botright = Chunk_Coords::from_pos(pos + extent); // Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp) for y in 0..=coords_botright.y - coords_topleft.y { for x in 0..=coords_botright.x - coords_topleft.x { if x == 0 && y == 0 { continue; } coords.extend(Some(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), ))); #[cfg(debug_assertions)] chk_coords.push(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), )); } } #[cfg(debug_assertions)] { // Result should be sorted and deduped // @WaitForStable //debug_assert!(coords.iter().is_sorted()); for i in 1..chk_coords.len() { debug_assert!(chk_coords[i] > chk_coords[i - 1]); } let mut deduped = chk_coords.clone(); deduped.dedup(); debug_assert!(chk_coords.len() == deduped.len()); } } } impl Spatial_Accelerator<Collider_Handle> for World_Chunks { fn get_neighbours<R>(&self, pos: Vec2f, extent: Vec2f, result: &mut R) where R: Extend<Collider_Handle>, { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { if let Some(chunk) = self.chunks.get(&coords) { result.extend(chunk.colliders.iter().copied()); } } } } #[cfg(debug_assertions)] impl World_Chunks { pub fn debug_draw(&self, painter: &mut Debug_Painter) { use inle_common::colors; use inle_common::paint_props::Paint_Properties; use inle_math::transform::Transform2D; if self.chunks.is_empty() { return; } let max_colliders = self .chunks .iter() .map(|(_, chk)| chk.colliders.len()) .max() .unwrap_or(0) as f32; for (coords, chunk) in &self.chunks { let world_pos = v2!(coords.to_world_pos().x, coords.to_world_pos().y); let col = colors::lerp_col( colors::rgba(0, 150, 0, 100), colors::rgba(150, 0, 0, 100), chunk.colliders.len() as f32 / max_colliders, ); painter.add_rect( v2!(CHUNK_WIDTH, CHUNK_HEIGHT), &Transform2D::from_pos(world_pos), Paint_Properties { color: col, border_color: colors::darken(col, 0.7), border_thick: (CHUNK_WIDTH / 50.).max(5.), ..Default::default() }, ); painter.add_text( &format!("{},{}: {}", coords.x, coords.y, chunk.colliders.len()), world_pos + v2!(10., 5.), (CHUNK_WIDTH as u16 / 10).max(20), colors::rgba(50, 220, 0, 250), ); } } } #[cfg(tests)] mod tests { use super::*; #[test] fn chunk_coords_ord() { assert!(Chunk_Coords { x: 0, y: 0 } < Chunk_Coords { x: 1, y: 0 }); assert!(Chunk_Coords { x: 1, y: 0 } < Chunk_Coords { x: 0, y: 1 }); assert!(Chunk_Coords { x: 1, y: 1 } < Chunk_Coords { x: 2, y: 1 }); assert!(Chunk_Coords { x: 2, y: 1 } < Chunk_Coords { x: 1, y: 2 }); } }
x: (pos.x / CHUNK_WIDTH).floor() as i32, y: (pos.y / CHUNK_HEIGHT).floor() as i32, } }
random_line_split
spatial.rs
use inle_alloc::temp::*; use inle_app::app::Engine_State; use inle_ecs::ecs_world::{Ecs_World, Entity, Evt_Entity_Destroyed}; use inle_events::evt_register::{with_cb_data, wrap_cb_data, Event_Callback_Data}; use inle_math::vector::Vec2f; use inle_physics::collider::C_Collider; use inle_physics::phys_world::{Collider_Handle, Physics_World}; use inle_physics::spatial::Spatial_Accelerator; use std::cmp::Ordering; use std::collections::HashMap; #[cfg(debug_assertions)] use {inle_debug::painter::Debug_Painter, std::collections::HashSet}; // @Speed: tune these numbers const CHUNK_WIDTH: f32 = 200.; const CHUNK_HEIGHT: f32 = 200.; #[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Hash)] pub struct Chunk_Coords { pub x: i32, pub y: i32, } impl PartialOrd for Chunk_Coords { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Chunk_Coords { fn cmp(&self, other: &Self) -> Ordering { match self.y.cmp(&other.y) { Ordering::Greater => Ordering::Greater, Ordering::Less => Ordering::Less, Ordering::Equal => self.x.cmp(&other.x), } } } impl Chunk_Coords { pub fn from_pos(pos: Vec2f) -> Self { Self { x: (pos.x / CHUNK_WIDTH).floor() as i32, y: (pos.y / CHUNK_HEIGHT).floor() as i32, } } pub fn to_world_pos(self) -> Vec2f { Vec2f { x: self.x as f32 * CHUNK_WIDTH, y: self.y as f32 * CHUNK_HEIGHT, } } } pub struct World_Chunks { chunks: HashMap<Chunk_Coords, World_Chunk>, to_destroy: Event_Callback_Data, } #[derive(Default, Debug)] pub struct World_Chunk { pub colliders: Vec<Collider_Handle>, } impl World_Chunks { pub fn new() -> Self { Self { chunks: HashMap::new(), to_destroy: wrap_cb_data(Vec::<Entity>::new()), } } pub fn init(&mut self, engine_state: &mut Engine_State) { engine_state .systems .evt_register .subscribe::<Evt_Entity_Destroyed>( Box::new(|entity, to_destroy| { with_cb_data(to_destroy.unwrap(), |to_destroy: &mut Vec<Entity>| { to_destroy.push(entity); }); }), Some(self.to_destroy.clone()), ); } pub fn update(&mut self, ecs_world: &Ecs_World, phys_world: &Physics_World) { trace!("world_chunks::update"); let mut to_remove = vec![]; with_cb_data(&mut self.to_destroy, |to_destroy: &mut Vec<Entity>| { for &entity in to_destroy.iter() { if let Some(collider) = ecs_world.get_component::<C_Collider>(entity) { for (cld, handle) in phys_world.get_all_colliders_with_handles(collider.phys_body_handle) { to_remove.push((handle, cld.position, cld.shape.extent())); } } } to_destroy.clear(); }); for (cld, pos, extent) in to_remove { self.remove_collider(cld, pos, extent); } } pub fn
(&self) -> usize { self.chunks.len() } pub fn add_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.add_collider_coords(cld_handle, coords); } } fn add_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self .chunks .entry(coords) .or_insert_with(World_Chunk::default); debug_assert!( !chunk.colliders.contains(&cld_handle), "Duplicate collider {:?} in chunk {:?}!", cld_handle, coords ); chunk.colliders.push(cld_handle); } pub fn remove_collider(&mut self, cld_handle: Collider_Handle, pos: Vec2f, extent: Vec2f) { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { self.remove_collider_coords(cld_handle, coords); } } fn remove_collider_coords(&mut self, cld_handle: Collider_Handle, coords: Chunk_Coords) { let chunk = self.chunks.get_mut(&coords).unwrap_or_else(|| { fatal!( "Collider {:?} should be in chunk {:?}, but that chunk does not exist.", cld_handle, coords ) }); let idx = chunk.colliders.iter().position(|&c| c == cld_handle); if let Some(idx) = idx { chunk.colliders.remove(idx); if chunk.colliders.is_empty() { self.chunks.remove(&coords); } } else { lerr!( "Collider {:?} not found in expected chunk {:?}.", cld_handle, coords ); } } pub fn update_collider( &mut self, cld_handle: Collider_Handle, prev_pos: Vec2f, new_pos: Vec2f, extent: Vec2f, frame_alloc: &mut Temp_Allocator, ) { trace!("world_chunks::update_collider"); let mut prev_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(prev_pos, extent, &mut prev_coords); let prev_coords = unsafe { prev_coords.into_read_only() }; let mut new_coords = excl_temp_array(frame_alloc); self.get_all_chunks_containing(new_pos, extent, &mut new_coords); let new_coords = unsafe { new_coords.into_read_only() }; let mut all_chunks = excl_temp_array(frame_alloc); // Pre-allocate enough memory to hold all the chunks; then `chunks_to_add` starts at index 0, // while `chunks_to_remove` starts at index `new_coords.len()`. // This works because we can have at most `new_coords.len()` chunks to add and `prev_coords.len()` // chunks to remove. unsafe { all_chunks.alloc_additional_uninit(new_coords.len() + prev_coords.len()); } let mut n_chunks_to_add = 0; let mut n_chunks_to_remove = 0; let chunks_to_add_offset = 0; let chunks_to_remove_offset = new_coords.len(); // Find chunks to add and to remove in O(n). // This algorithm assumes that both prev_coords and new_coords are sorted and deduped. let mut p_idx = 0; let mut n_idx = 0; while p_idx < prev_coords.len() && n_idx < new_coords.len() { let pc = prev_coords[p_idx]; let nc = new_coords[n_idx]; match pc.cmp(&nc) { Ordering::Less => { all_chunks[chunks_to_remove_offset + n_chunks_to_remove] = pc; n_chunks_to_remove += 1; p_idx += 1; } Ordering::Greater => { all_chunks[chunks_to_add_offset + n_chunks_to_add] = nc; n_chunks_to_add += 1; n_idx += 1; } Ordering::Equal => { p_idx += 1; n_idx += 1; } } } if p_idx < prev_coords.len() { let diff = prev_coords.len() - p_idx; for i in 0..diff { all_chunks[chunks_to_remove_offset + n_chunks_to_remove + i] = prev_coords[p_idx + i]; } n_chunks_to_remove += diff; } else if n_idx < new_coords.len() { let diff = new_coords.len() - n_idx; for i in 0..diff { all_chunks[chunks_to_add_offset + n_chunks_to_add + i] = new_coords[n_idx + i]; } n_chunks_to_add += diff; } #[cfg(debug_assertions)] { let to_remove = all_chunks .iter() .cloned() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) .collect::<HashSet<_>>(); let to_add = all_chunks .iter() .cloned() .skip(chunks_to_add_offset) .take(n_chunks_to_add) .collect::<HashSet<_>>(); debug_assert_eq!(to_remove.intersection(&to_add).count(), 0); } for coord in all_chunks .iter() .skip(chunks_to_add_offset) .take(n_chunks_to_add) { self.add_collider_coords(cld_handle, *coord); } for coord in all_chunks .iter() .skip(chunks_to_remove_offset) .take(n_chunks_to_remove) { self.remove_collider_coords(cld_handle, *coord); } } fn get_all_chunks_containing<T>(&self, pos: Vec2f, extent: Vec2f, coords: &mut T) where T: Extend<Chunk_Coords>, { trace!("get_all_chunks_containing"); #[cfg(debug_assertions)] let mut chk_coords = vec![]; // We need to @Cleanup the -extent*0.5 offset we need to apply and make it consistent throughout the game! let pos = pos - extent * 0.5; let coords_topleft = Chunk_Coords::from_pos(pos); coords.extend(Some(coords_topleft)); #[cfg(debug_assertions)] chk_coords.push(coords_topleft); let coords_botright = Chunk_Coords::from_pos(pos + extent); // Note: we cycle y-major so the result is automatically sorted (as for Chunk_Coords::cmp) for y in 0..=coords_botright.y - coords_topleft.y { for x in 0..=coords_botright.x - coords_topleft.x { if x == 0 && y == 0 { continue; } coords.extend(Some(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), ))); #[cfg(debug_assertions)] chk_coords.push(Chunk_Coords::from_pos( pos + v2!(x as f32 * CHUNK_WIDTH, y as f32 * CHUNK_HEIGHT), )); } } #[cfg(debug_assertions)] { // Result should be sorted and deduped // @WaitForStable //debug_assert!(coords.iter().is_sorted()); for i in 1..chk_coords.len() { debug_assert!(chk_coords[i] > chk_coords[i - 1]); } let mut deduped = chk_coords.clone(); deduped.dedup(); debug_assert!(chk_coords.len() == deduped.len()); } } } impl Spatial_Accelerator<Collider_Handle> for World_Chunks { fn get_neighbours<R>(&self, pos: Vec2f, extent: Vec2f, result: &mut R) where R: Extend<Collider_Handle>, { let mut chunks = vec![]; self.get_all_chunks_containing(pos, extent, &mut chunks); for coords in chunks { if let Some(chunk) = self.chunks.get(&coords) { result.extend(chunk.colliders.iter().copied()); } } } } #[cfg(debug_assertions)] impl World_Chunks { pub fn debug_draw(&self, painter: &mut Debug_Painter) { use inle_common::colors; use inle_common::paint_props::Paint_Properties; use inle_math::transform::Transform2D; if self.chunks.is_empty() { return; } let max_colliders = self .chunks .iter() .map(|(_, chk)| chk.colliders.len()) .max() .unwrap_or(0) as f32; for (coords, chunk) in &self.chunks { let world_pos = v2!(coords.to_world_pos().x, coords.to_world_pos().y); let col = colors::lerp_col( colors::rgba(0, 150, 0, 100), colors::rgba(150, 0, 0, 100), chunk.colliders.len() as f32 / max_colliders, ); painter.add_rect( v2!(CHUNK_WIDTH, CHUNK_HEIGHT), &Transform2D::from_pos(world_pos), Paint_Properties { color: col, border_color: colors::darken(col, 0.7), border_thick: (CHUNK_WIDTH / 50.).max(5.), ..Default::default() }, ); painter.add_text( &format!("{},{}: {}", coords.x, coords.y, chunk.colliders.len()), world_pos + v2!(10., 5.), (CHUNK_WIDTH as u16 / 10).max(20), colors::rgba(50, 220, 0, 250), ); } } } #[cfg(tests)] mod tests { use super::*; #[test] fn chunk_coords_ord() { assert!(Chunk_Coords { x: 0, y: 0 } < Chunk_Coords { x: 1, y: 0 }); assert!(Chunk_Coords { x: 1, y: 0 } < Chunk_Coords { x: 0, y: 1 }); assert!(Chunk_Coords { x: 1, y: 1 } < Chunk_Coords { x: 2, y: 1 }); assert!(Chunk_Coords { x: 2, y: 1 } < Chunk_Coords { x: 1, y: 2 }); } }
n_chunks
identifier_name
main.rs
/* --- Day 4: Passport Processing --- You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world. It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary. Due to some questionable network security, you realize you might be able to solve both of these problems at the same time. The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines. Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field). The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid. The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid. According to the above rules, your improved system would report 2 valid passports. Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid? --- Part Two --- The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick! You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation: byr (Birth Year) - four digits; at least 1920 and at most 2002. iyr (Issue Year) - four digits; at least 2010 and at most 2020. eyr (Expiration Year) - four digits; at least 2020 and at most 2030. hgt (Height) - a number followed by either cm or in: If cm, the number must be at least 150 and at most 193. If in, the number must be at least 59 and at most 76. hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not. Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid? */ use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; fn main() { let passports = read_in_passports("src/passports.txt"); println!("Passports with required fields = {}", check_required_fields(&passports)); let mut valid_passports = 0; for passport in passports { if validate_passport(&passport) { valid_passports += 1; } } println!("Valid passports = {}", valid_passports); } // struct Passport { // byr: Option<u32>, // iyr: Option<u32>, // eyr: Option<u32>, // hgt: Option<Height>, // hcl: Option<String>, // ecl: Option<String>, // pid: Option<String>, // cid: Option<String>, // } // impl Passport { // fn update_field<T>(&self, field: &str, value: T) { // match field { // "byr" => self.byr = Option::from(value), // "iyr" // } // } // fn check_required_fields(&self) -> bool { // self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() || // self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() || // self.pid.is_some() // } // fn validate() { // } // } // struct Height { // h: u32, // unit: String, // } /// Read in the passports from the defined file and return as a vector of /// HashMap /// /// # Arguments /// /// * `filename` - the filename to read the passports from fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > { let mut passports: Vec< HashMap<String, String> > = Vec::new(); let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut lines = reader.lines(); // iterate over lines until end of file loop { let mut line = lines.next(); if line.is_none() { break; } let mut entry = HashMap::new(); loop { let lin = String::from(line.unwrap().unwrap().trim()); // let l = lin.trim(); if lin.is_empty() { break; } for item in lin.split_whitespace() { let mut pair = item.split(':'); let key = String::from(pair.next().unwrap()); let value = String::from(pair.next().unwrap()); entry.insert(key, value); } line = lines.next(); if line.is_none() { break; } } passports.push(entry); } passports } fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 { // Check that all passports contain the required fields // byr - iyr - eyr - hgt - hcl - ecl -pid let required_fields = vec!( String::from("byr"), String::from("iyr"), String::from("eyr"), String::from("hgt"), String::from("hcl"), String::from("ecl"), String::from("pid")); let mut valid_passports_total = 0; for passport in passports { let mut valid = true; for field in &required_fields { if passport.contains_key(field) == false { valid = false; break; } } valid_passports_total += if valid { 1 } else
; } valid_passports_total } fn validate_passport(passport: &HashMap<String, String>) -> bool { let mut result = true; // Birth year if let Some(byr) = passport.get("byr") { result = result && validate_byr(byr); } else { result = false; } // Issue Year if let Some(iyr) = passport.get("iyr") { result = result && validate_iyr(iyr); } else { result = false; } // Expiration Year if let Some(eyr) = passport.get("eyr") { result = result && validate_eyr(eyr); } else { result = false; } // Height if let Some(hgt) = passport.get("hgt") { result = result && validate_hgt(hgt); } else { result = false; } // Hair Colour if let Some(hcl) = passport.get("hcl") { result = result && validate_hcl(hcl); } else { result = false; } // Eye Colour if let Some(ecl) = passport.get("ecl") { result = result && validate_ecl(ecl); } else { result = false; } // Passport ID if let Some(pid) = passport.get("pid") { result = result && validate_pid(pid); } else { result = false; } // Country ID - Ignored result } fn validate_byr(field: &String) -> bool { // Requirement are: // - four digits // - at least 1920 and at most 2002. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 1920 && year <= 2002 } else { false } } fn validate_iyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2010 && year <= 2020 } else { false } } fn validate_eyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2020 && year <= 2030 } else { false } } fn validate_hgt(field: &String) -> bool { // Requirement are: // - a number followed by cm or in // - if cm -> 150-193 inclusive // - if in -> 59-76 inclusive // Remove last two letters let chars = field.chars().count(); if chars > 2 { let measurement: String = field.chars().take(chars - 2).collect(); let measurement_type: String = field.chars().rev().take(2).collect(); // Convert to number if let Ok(value) = measurement.parse::< u32 >() { match measurement_type.as_str() { // Strings are reversed!!! "mc" => value >= 150 && value <= 193, "ni" => value >= 59 && value <= 76, _ => false } } else { false } } else { false } } fn validate_hcl(field: &String) -> bool { // Requirement are: // - starts with # // - followed by 6 0-9 or a-f charachters let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 7 { if field.chars().next().unwrap() == '#' { let hcl: String = field.chars().rev().take(6).collect(); validate_chars(&hcl) } else { false } } else { false } } fn validate_ecl(field: &String) -> bool { // Requirement are: // - exactly one of: amb blu brn gry grn hzl oth let valid_colors = vec!( String::from("amb"), String::from("blu"), String::from("brn"), String::from("gry"), String::from("grn"), String::from("hzl"), String::from("oth") ); valid_colors.contains(field) } fn validate_pid(field: &String) -> bool { // Requirement are: // - nine-digit number, including leading zeroes let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 9 { validate_chars(field) } else { false } }
{ 0 }
conditional_block
main.rs
/* --- Day 4: Passport Processing --- You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world. It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary. Due to some questionable network security, you realize you might be able to solve both of these problems at the same time. The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines. Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field). The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid. The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid. According to the above rules, your improved system would report 2 valid passports. Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid? --- Part Two --- The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick! You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation: byr (Birth Year) - four digits; at least 1920 and at most 2002. iyr (Issue Year) - four digits; at least 2010 and at most 2020. eyr (Expiration Year) - four digits; at least 2020 and at most 2030. hgt (Height) - a number followed by either cm or in: If cm, the number must be at least 150 and at most 193. If in, the number must be at least 59 and at most 76. hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not. Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid? */ use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; fn main() { let passports = read_in_passports("src/passports.txt"); println!("Passports with required fields = {}", check_required_fields(&passports)); let mut valid_passports = 0; for passport in passports { if validate_passport(&passport) { valid_passports += 1; } } println!("Valid passports = {}", valid_passports); } // struct Passport { // byr: Option<u32>, // iyr: Option<u32>, // eyr: Option<u32>, // hgt: Option<Height>, // hcl: Option<String>, // ecl: Option<String>, // pid: Option<String>, // cid: Option<String>, // } // impl Passport { // fn update_field<T>(&self, field: &str, value: T) { // match field { // "byr" => self.byr = Option::from(value), // "iyr" // } // } // fn check_required_fields(&self) -> bool { // self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() || // self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() || // self.pid.is_some() // } // fn validate() { // } // } // struct Height { // h: u32, // unit: String, // } /// Read in the passports from the defined file and return as a vector of /// HashMap /// /// # Arguments /// /// * `filename` - the filename to read the passports from fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > { let mut passports: Vec< HashMap<String, String> > = Vec::new(); let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut lines = reader.lines(); // iterate over lines until end of file loop { let mut line = lines.next(); if line.is_none() { break; } let mut entry = HashMap::new(); loop { let lin = String::from(line.unwrap().unwrap().trim()); // let l = lin.trim(); if lin.is_empty() { break; } for item in lin.split_whitespace() { let mut pair = item.split(':'); let key = String::from(pair.next().unwrap()); let value = String::from(pair.next().unwrap()); entry.insert(key, value); } line = lines.next(); if line.is_none() { break; } } passports.push(entry); } passports } fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 { // Check that all passports contain the required fields // byr - iyr - eyr - hgt - hcl - ecl -pid let required_fields = vec!( String::from("byr"), String::from("iyr"), String::from("eyr"), String::from("hgt"), String::from("hcl"), String::from("ecl"), String::from("pid")); let mut valid_passports_total = 0; for passport in passports { let mut valid = true; for field in &required_fields { if passport.contains_key(field) == false { valid = false; break; } } valid_passports_total += if valid { 1 } else { 0 }; } valid_passports_total } fn
(passport: &HashMap<String, String>) -> bool { let mut result = true; // Birth year if let Some(byr) = passport.get("byr") { result = result && validate_byr(byr); } else { result = false; } // Issue Year if let Some(iyr) = passport.get("iyr") { result = result && validate_iyr(iyr); } else { result = false; } // Expiration Year if let Some(eyr) = passport.get("eyr") { result = result && validate_eyr(eyr); } else { result = false; } // Height if let Some(hgt) = passport.get("hgt") { result = result && validate_hgt(hgt); } else { result = false; } // Hair Colour if let Some(hcl) = passport.get("hcl") { result = result && validate_hcl(hcl); } else { result = false; } // Eye Colour if let Some(ecl) = passport.get("ecl") { result = result && validate_ecl(ecl); } else { result = false; } // Passport ID if let Some(pid) = passport.get("pid") { result = result && validate_pid(pid); } else { result = false; } // Country ID - Ignored result } fn validate_byr(field: &String) -> bool { // Requirement are: // - four digits // - at least 1920 and at most 2002. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 1920 && year <= 2002 } else { false } } fn validate_iyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2010 && year <= 2020 } else { false } } fn validate_eyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2020 && year <= 2030 } else { false } } fn validate_hgt(field: &String) -> bool { // Requirement are: // - a number followed by cm or in // - if cm -> 150-193 inclusive // - if in -> 59-76 inclusive // Remove last two letters let chars = field.chars().count(); if chars > 2 { let measurement: String = field.chars().take(chars - 2).collect(); let measurement_type: String = field.chars().rev().take(2).collect(); // Convert to number if let Ok(value) = measurement.parse::< u32 >() { match measurement_type.as_str() { // Strings are reversed!!! "mc" => value >= 150 && value <= 193, "ni" => value >= 59 && value <= 76, _ => false } } else { false } } else { false } } fn validate_hcl(field: &String) -> bool { // Requirement are: // - starts with # // - followed by 6 0-9 or a-f charachters let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 7 { if field.chars().next().unwrap() == '#' { let hcl: String = field.chars().rev().take(6).collect(); validate_chars(&hcl) } else { false } } else { false } } fn validate_ecl(field: &String) -> bool { // Requirement are: // - exactly one of: amb blu brn gry grn hzl oth let valid_colors = vec!( String::from("amb"), String::from("blu"), String::from("brn"), String::from("gry"), String::from("grn"), String::from("hzl"), String::from("oth") ); valid_colors.contains(field) } fn validate_pid(field: &String) -> bool { // Requirement are: // - nine-digit number, including leading zeroes let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 9 { validate_chars(field) } else { false } }
validate_passport
identifier_name
main.rs
/* --- Day 4: Passport Processing --- You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world. It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary. Due to some questionable network security, you realize you might be able to solve both of these problems at the same time. The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines. Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field). The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid. The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid. According to the above rules, your improved system would report 2 valid passports. Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid? --- Part Two --- The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick! You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation: byr (Birth Year) - four digits; at least 1920 and at most 2002. iyr (Issue Year) - four digits; at least 2010 and at most 2020. eyr (Expiration Year) - four digits; at least 2020 and at most 2030. hgt (Height) - a number followed by either cm or in: If cm, the number must be at least 150 and at most 193. If in, the number must be at least 59 and at most 76. hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not. Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190
ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid? */ use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; fn main() { let passports = read_in_passports("src/passports.txt"); println!("Passports with required fields = {}", check_required_fields(&passports)); let mut valid_passports = 0; for passport in passports { if validate_passport(&passport) { valid_passports += 1; } } println!("Valid passports = {}", valid_passports); } // struct Passport { // byr: Option<u32>, // iyr: Option<u32>, // eyr: Option<u32>, // hgt: Option<Height>, // hcl: Option<String>, // ecl: Option<String>, // pid: Option<String>, // cid: Option<String>, // } // impl Passport { // fn update_field<T>(&self, field: &str, value: T) { // match field { // "byr" => self.byr = Option::from(value), // "iyr" // } // } // fn check_required_fields(&self) -> bool { // self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() || // self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() || // self.pid.is_some() // } // fn validate() { // } // } // struct Height { // h: u32, // unit: String, // } /// Read in the passports from the defined file and return as a vector of /// HashMap /// /// # Arguments /// /// * `filename` - the filename to read the passports from fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> > { let mut passports: Vec< HashMap<String, String> > = Vec::new(); let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut lines = reader.lines(); // iterate over lines until end of file loop { let mut line = lines.next(); if line.is_none() { break; } let mut entry = HashMap::new(); loop { let lin = String::from(line.unwrap().unwrap().trim()); // let l = lin.trim(); if lin.is_empty() { break; } for item in lin.split_whitespace() { let mut pair = item.split(':'); let key = String::from(pair.next().unwrap()); let value = String::from(pair.next().unwrap()); entry.insert(key, value); } line = lines.next(); if line.is_none() { break; } } passports.push(entry); } passports } fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 { // Check that all passports contain the required fields // byr - iyr - eyr - hgt - hcl - ecl -pid let required_fields = vec!( String::from("byr"), String::from("iyr"), String::from("eyr"), String::from("hgt"), String::from("hcl"), String::from("ecl"), String::from("pid")); let mut valid_passports_total = 0; for passport in passports { let mut valid = true; for field in &required_fields { if passport.contains_key(field) == false { valid = false; break; } } valid_passports_total += if valid { 1 } else { 0 }; } valid_passports_total } fn validate_passport(passport: &HashMap<String, String>) -> bool { let mut result = true; // Birth year if let Some(byr) = passport.get("byr") { result = result && validate_byr(byr); } else { result = false; } // Issue Year if let Some(iyr) = passport.get("iyr") { result = result && validate_iyr(iyr); } else { result = false; } // Expiration Year if let Some(eyr) = passport.get("eyr") { result = result && validate_eyr(eyr); } else { result = false; } // Height if let Some(hgt) = passport.get("hgt") { result = result && validate_hgt(hgt); } else { result = false; } // Hair Colour if let Some(hcl) = passport.get("hcl") { result = result && validate_hcl(hcl); } else { result = false; } // Eye Colour if let Some(ecl) = passport.get("ecl") { result = result && validate_ecl(ecl); } else { result = false; } // Passport ID if let Some(pid) = passport.get("pid") { result = result && validate_pid(pid); } else { result = false; } // Country ID - Ignored result } fn validate_byr(field: &String) -> bool { // Requirement are: // - four digits // - at least 1920 and at most 2002. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 1920 && year <= 2002 } else { false } } fn validate_iyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2010 && year <= 2020 } else { false } } fn validate_eyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2020 && year <= 2030 } else { false } } fn validate_hgt(field: &String) -> bool { // Requirement are: // - a number followed by cm or in // - if cm -> 150-193 inclusive // - if in -> 59-76 inclusive // Remove last two letters let chars = field.chars().count(); if chars > 2 { let measurement: String = field.chars().take(chars - 2).collect(); let measurement_type: String = field.chars().rev().take(2).collect(); // Convert to number if let Ok(value) = measurement.parse::< u32 >() { match measurement_type.as_str() { // Strings are reversed!!! "mc" => value >= 150 && value <= 193, "ni" => value >= 59 && value <= 76, _ => false } } else { false } } else { false } } fn validate_hcl(field: &String) -> bool { // Requirement are: // - starts with # // - followed by 6 0-9 or a-f charachters let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 7 { if field.chars().next().unwrap() == '#' { let hcl: String = field.chars().rev().take(6).collect(); validate_chars(&hcl) } else { false } } else { false } } fn validate_ecl(field: &String) -> bool { // Requirement are: // - exactly one of: amb blu brn gry grn hzl oth let valid_colors = vec!( String::from("amb"), String::from("blu"), String::from("brn"), String::from("gry"), String::from("grn"), String::from("hzl"), String::from("oth") ); valid_colors.contains(field) } fn validate_pid(field: &String) -> bool { // Requirement are: // - nine-digit number, including leading zeroes let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 9 { validate_chars(field) } else { false } }
hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc
random_line_split
main.rs
/* --- Day 4: Passport Processing --- You arrive at the airport only to realize that you grabbed your North Pole Credentials instead of your passport. While these documents are extremely similar, North Pole Credentials aren't issued by a country and therefore aren't actually valid documentation for travel in most of the world. It seems like you're not the only one having problems, though; a very long line has formed for the automatic passport scanners, and the delay could upset your travel itinerary. Due to some questionable network security, you realize you might be able to solve both of these problems at the same time. The automatic passport scanners are slow because they're having trouble detecting which passports have all required fields. The expected fields are as follows: byr (Birth Year) iyr (Issue Year) eyr (Expiration Year) hgt (Height) hcl (Hair Color) ecl (Eye Color) pid (Passport ID) cid (Country ID) Passport data is validated in batch files (your puzzle input). Each passport is represented as a sequence of key:value pairs separated by spaces or newlines. Passports are separated by blank lines. Here is an example batch file containing four passports: ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in The first passport is valid - all eight fields are present. The second passport is invalid - it is missing hgt (the Height field). The third passport is interesting; the only missing field is cid, so it looks like data from North Pole Credentials, not a passport at all! Surely, nobody would mind if you made the system temporarily ignore missing cid fields. Treat this "passport" as valid. The fourth passport is missing two fields, cid and byr. Missing cid is fine, but missing any other field is not, so this passport is invalid. According to the above rules, your improved system would report 2 valid passports. Count the number of valid passports - those that have all required fields. Treat cid as optional. In your batch file, how many passports are valid? --- Part Two --- The line is moving more quickly now, but you overhear airport security talking about how passports with invalid data are getting through. Better add some data validation, quick! You can continue to ignore the cid field, but each other field has strict rules about what values are valid for automatic validation: byr (Birth Year) - four digits; at least 1920 and at most 2002. iyr (Issue Year) - four digits; at least 2010 and at most 2020. eyr (Expiration Year) - four digits; at least 2020 and at most 2030. hgt (Height) - a number followed by either cm or in: If cm, the number must be at least 150 and at most 193. If in, the number must be at least 59 and at most 76. hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. pid (Passport ID) - a nine-digit number, including leading zeroes. cid (Country ID) - ignored, missing or not. Your job is to count the passports where all required fields are both present and valid according to the above rules. Here are some example values: byr valid: 2002 byr invalid: 2003 hgt valid: 60in hgt valid: 190cm hgt invalid: 190in hgt invalid: 190 hcl valid: #123abc hcl invalid: #123abz hcl invalid: 123abc ecl valid: brn ecl invalid: wat pid valid: 000000001 pid invalid: 0123456789 Here are some invalid passports: eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2007 Here are some valid passports: pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 Count the number of valid passports - those that have all required fields and valid values. Continue to treat cid as optional. In your batch file, how many passports are valid? */ use std::fs::File; use std::io::{BufRead, BufReader}; use std::collections::HashMap; fn main() { let passports = read_in_passports("src/passports.txt"); println!("Passports with required fields = {}", check_required_fields(&passports)); let mut valid_passports = 0; for passport in passports { if validate_passport(&passport) { valid_passports += 1; } } println!("Valid passports = {}", valid_passports); } // struct Passport { // byr: Option<u32>, // iyr: Option<u32>, // eyr: Option<u32>, // hgt: Option<Height>, // hcl: Option<String>, // ecl: Option<String>, // pid: Option<String>, // cid: Option<String>, // } // impl Passport { // fn update_field<T>(&self, field: &str, value: T) { // match field { // "byr" => self.byr = Option::from(value), // "iyr" // } // } // fn check_required_fields(&self) -> bool { // self.byr.is_some() || self.iyr.is_some() || self.eyr.is_some() || // self.hgt.is_some() || self.hcl.is_some() || self.ecl.is_some() || // self.pid.is_some() // } // fn validate() { // } // } // struct Height { // h: u32, // unit: String, // } /// Read in the passports from the defined file and return as a vector of /// HashMap /// /// # Arguments /// /// * `filename` - the filename to read the passports from fn read_in_passports(filename: &str) -> Vec< HashMap<String, String> >
for item in lin.split_whitespace() { let mut pair = item.split(':'); let key = String::from(pair.next().unwrap()); let value = String::from(pair.next().unwrap()); entry.insert(key, value); } line = lines.next(); if line.is_none() { break; } } passports.push(entry); } passports } fn check_required_fields(passports: &Vec< HashMap<String, String> >) -> u32 { // Check that all passports contain the required fields // byr - iyr - eyr - hgt - hcl - ecl -pid let required_fields = vec!( String::from("byr"), String::from("iyr"), String::from("eyr"), String::from("hgt"), String::from("hcl"), String::from("ecl"), String::from("pid")); let mut valid_passports_total = 0; for passport in passports { let mut valid = true; for field in &required_fields { if passport.contains_key(field) == false { valid = false; break; } } valid_passports_total += if valid { 1 } else { 0 }; } valid_passports_total } fn validate_passport(passport: &HashMap<String, String>) -> bool { let mut result = true; // Birth year if let Some(byr) = passport.get("byr") { result = result && validate_byr(byr); } else { result = false; } // Issue Year if let Some(iyr) = passport.get("iyr") { result = result && validate_iyr(iyr); } else { result = false; } // Expiration Year if let Some(eyr) = passport.get("eyr") { result = result && validate_eyr(eyr); } else { result = false; } // Height if let Some(hgt) = passport.get("hgt") { result = result && validate_hgt(hgt); } else { result = false; } // Hair Colour if let Some(hcl) = passport.get("hcl") { result = result && validate_hcl(hcl); } else { result = false; } // Eye Colour if let Some(ecl) = passport.get("ecl") { result = result && validate_ecl(ecl); } else { result = false; } // Passport ID if let Some(pid) = passport.get("pid") { result = result && validate_pid(pid); } else { result = false; } // Country ID - Ignored result } fn validate_byr(field: &String) -> bool { // Requirement are: // - four digits // - at least 1920 and at most 2002. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 1920 && year <= 2002 } else { false } } fn validate_iyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2010 && year <= 2020 } else { false } } fn validate_eyr(field: &String) -> bool { // Requirement are: // - four digits // - at least 2010 and at most 2020. // Convert to number if let Ok(year) = field.parse::< u32 >() { year >= 2020 && year <= 2030 } else { false } } fn validate_hgt(field: &String) -> bool { // Requirement are: // - a number followed by cm or in // - if cm -> 150-193 inclusive // - if in -> 59-76 inclusive // Remove last two letters let chars = field.chars().count(); if chars > 2 { let measurement: String = field.chars().take(chars - 2).collect(); let measurement_type: String = field.chars().rev().take(2).collect(); // Convert to number if let Ok(value) = measurement.parse::< u32 >() { match measurement_type.as_str() { // Strings are reversed!!! "mc" => value >= 150 && value <= 193, "ni" => value >= 59 && value <= 76, _ => false } } else { false } } else { false } } fn validate_hcl(field: &String) -> bool { // Requirement are: // - starts with # // - followed by 6 0-9 or a-f charachters let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 7 { if field.chars().next().unwrap() == '#' { let hcl: String = field.chars().rev().take(6).collect(); validate_chars(&hcl) } else { false } } else { false } } fn validate_ecl(field: &String) -> bool { // Requirement are: // - exactly one of: amb blu brn gry grn hzl oth let valid_colors = vec!( String::from("amb"), String::from("blu"), String::from("brn"), String::from("gry"), String::from("grn"), String::from("hzl"), String::from("oth") ); valid_colors.contains(field) } fn validate_pid(field: &String) -> bool { // Requirement are: // - nine-digit number, including leading zeroes let valid_chars = vec!('0', '1', '2', '3', '4', '5', '6', '7', '8', '9'); let validate_chars = | hcl: &String | -> bool { let mut result = true; for c in hcl.chars() { result = result && valid_chars.contains(&c); } result }; let chars = field.chars().count(); if chars == 9 { validate_chars(field) } else { false } }
{ let mut passports: Vec< HashMap<String, String> > = Vec::new(); let file = File::open(filename).unwrap(); let reader = BufReader::new(file); let mut lines = reader.lines(); // iterate over lines until end of file loop { let mut line = lines.next(); if line.is_none() { break; } let mut entry = HashMap::new(); loop { let lin = String::from(line.unwrap().unwrap().trim()); // let l = lin.trim(); if lin.is_empty() { break; }
identifier_body
reader.rs
}, /// The padding header at the end of the packet, if present, specifies the number of padding /// bytes, including itself, and therefore cannot be less than `1`, or greater than the /// available space. PaddingLengthInvalid(u8), } impl<'a> RtpReader<'a> { /// An RTP packet header is no fewer than 12 bytes long pub const MIN_HEADER_LEN: usize = 12; const EXTENSION_HEADER_LEN: usize = 4; /// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is /// malformed. /// /// In particular, if there is too little data in the given buffer, such that some later /// attempt to access an RTP header field would need to access bytes that are not available, /// then this method will fail up front, rather than allowing attempts to access any header /// field to fail later on. pub fn
(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> { if b.len() < Self::MIN_HEADER_LEN { return Err(RtpReaderError::BufferTooShort(b.len())); } let r = RtpReader { buf: b }; if r.version()!= 2 { return Err(RtpReaderError::UnsupportedVersion(r.version())); } if r.extension_flag() { let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN; if extension_start > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_start, buffer_len: b.len(), }); } let extension_end = extension_start + r.extension_len(); if extension_end > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_end, buffer_len: b.len(), }); } } if r.payload_offset() > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len(), }); } if r.padding_flag() { let post_header_bytes = b.len() - r.payload_offset(); // with 'padding' flag set, there must be at least a single byte after the headers to // hold the padding length if post_header_bytes == 0 { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len() - 1, }); } let pad_len = r.padding_len()?; if r.payload_offset() + pad_len as usize > b.len() { return Err(RtpReaderError::PaddingLengthInvalid(pad_len)); } } Ok(r) } /// Version field value (currently only version 2 is supported, so other values will not be /// seen from this release of `rtp-rs`. pub fn version(&self) -> u8 { (self.buf[0] & 0b1100_0000) >> 6 } /// Flag indicating if padding is present at the end of the payload data. fn padding_flag(&self) -> bool { (self.buf[0] & 0b0010_0000)!= 0 } /// Returns the size of the padding at the end of this packet, or `None` if the padding flag is /// not set in the packet header pub fn padding(&self) -> Option<u8> { if self.padding_flag() { Some(self.padding_len().unwrap()) } else { None } } fn extension_flag(&self) -> bool { (self.buf[0] & 0b0001_0000)!= 0 } /// A count of the number of CSRC fields present in the RTP headers - may be `0`. /// /// See [csrc()](#method.csrc). pub fn csrc_count(&self) -> u8 { self.buf[0] & 0b0000_1111 } /// A'marker', which may have some definition in the specific RTP profile in use pub fn mark(&self) -> bool { (self.buf[1] & 0b1000_0000)!= 0 } /// Indicates the type of content carried in this RTP packet. /// /// A few types-values are defined in the standard, but in many applications of RTP the value /// of this field needs to be agreed between sender and receiver by some mechanism outside of /// RTP itself. pub fn payload_type(&self) -> u8 { self.buf[1] & 0b0111_1111 } /// The sequence number of this particular packet. /// /// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit /// value of `65535`. /// /// Receivers can identify packet losses or reordering by inspecting the value of this field /// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps /// calling code reason about sequence number problems in the face of any wraparound that might /// have legitimately happened. pub fn sequence_number(&self) -> Seq { Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16)) } /// The timestamp of this packet, given in a timebase that relates to the particular /// `payload_type` in use. /// /// It is perfectly possible for successive packets in a sequence to have the same value, or /// to have values that differ by arbitrarily large amounts. /// /// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value /// of `4294967295`. pub fn timestamp(&self) -> u32 { (self.buf[4] as u32) << 24 | (self.buf[5] as u32) << 16 | (self.buf[6] as u32) << 8 | (self.buf[7] as u32) } /// The _synchronisation source_ for this packet. Many applications of RTP do not use this /// field. pub fn ssrc(&self) -> u32 { (self.buf[8] as u32) << 24 | (self.buf[9] as u32) << 16 | (self.buf[10] as u32) << 8 | (self.buf[11] as u32) } /// A potentially empty list of _contributing sources_ for this packet. Many applications of /// RTP do not use this field. pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ { self.buf[Self::MIN_HEADER_LEN..] .chunks(4) .take(self.csrc_count() as usize) .map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32)) } /// Returns the offset of the payload for the packet pub fn payload_offset(&self) -> usize { let offset = self.csrc_end(); if self.extension_flag() { offset + Self::EXTENSION_HEADER_LEN + self.extension_len() } else { offset } } fn csrc_end(&self) -> usize { Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize } /// Returns the payload data of this RTP packet, excluding the packet's headers and any /// optional trailing padding. pub fn payload(&self) -> &'a [u8] { let pad = if self.padding_flag() { // in Self::new(), we already checked this was Ok, and will not attempt an invalid // slice below, self.padding_len().unwrap() as usize } else { 0 }; &self.buf[self.payload_offset()..self.buf.len() - pad] } fn extension_len(&self) -> usize { let offset = self.csrc_end(); // The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a // valid length. 4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize)) } // must only be used if padding() returns true fn padding_len(&self) -> Result<u8, RtpReaderError> { match self.buf[self.buf.len() - 1] { 0 => Err(RtpReaderError::PaddingLengthInvalid(0)), l => Ok(l), } } /// Returns details of the optional RTP header extension field. If there is an extension, /// the first component of the resulting tuple is the extension id, and the second is a /// byte-slice for the extension data value, to be interpreted by the application. pub fn extension(&self) -> Option<(u16, &'a [u8])> { if self.extension_flag() { let offset = self.csrc_end(); let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16); let start = offset + 4; Some((id, &self.buf[start..start + self.extension_len()])) } else { None } } /// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original /// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder` /// if required. /// /// The padding is not copied from the original since, while we do know how many padding bytes /// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly /// copying the padding could result in an incorrect result _if_ the payload is subsequently /// changed for one with a different length. /// /// If you know your output packets don't need padding, there is nothing more to do, since /// that is the default for the resulting `RtpPacketBulder`. /// /// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify /// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded /// to a 4 byte boundary. pub fn create_builder(&self) -> RtpPacketBuilder<'a> { let mut builder = RtpPacketBuilder::new() .payload_type(self.payload_type()) .marked(self.mark()) .sequence(self.sequence_number()) .ssrc(self.ssrc()) .timestamp(self.timestamp()) .payload(self.payload()); if let Some(ext) = self.extension() { builder = builder.extension(ext.0, ext.1); } for csrc in self.csrc() { builder = builder.add_csrc(csrc); } builder } } impl<'a> fmt::Debug for RtpReader<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("RtpReader") .field("version", &self.version()) .field("padding", &self.padding()) .field("extension", &self.extension().map(|(id, _)| id)) .field("csrc_count", &self.csrc_count()) .field("mark", &self.mark()) .field("payload_type", &self.payload_type()) .field("sequence_number", &self.sequence_number()) .field("timestamp", &self.timestamp()) .field("ssrc", &self.ssrc()) .field("payload_length", &self.payload().len()) .finish() } } #[cfg(test)] mod tests { use super::*; use crate::IntoSeqIterator; const TEST_RTP_PACKET: [u8; 391] = [ 0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8, 0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8, 0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8, 0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8, 0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8, 0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8, 0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8, 0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8, 0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8, 0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8, 0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8, 0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8, 0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8, 0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8, 0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8, 0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8, 0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8, 0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8, 0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8, 0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8, 0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8, 0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8, 0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8, 0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8, 0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8, 0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8, 0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8, 0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8, 0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8, 0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8, 0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8, 0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8, 0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8, 0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8, 0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8, 0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8, ]; const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [ 144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8, 191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8, 64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8, 165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8, 30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8, 120u8, 63u8, 17u8, 101u8, 55u8, 17u8 ]; #[test] fn version() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_none()); assert_eq!(0, reader.csrc_count()); assert!(reader.mark()); assert_eq!(96, reader.payload_type()); assert_eq!(Seq(10040), reader.sequence_number()); assert_eq!(1_692_665_255, reader.timestamp()); assert_eq!(0xa242_af01, reader.ssrc()); assert_eq!(379, reader.payload().len()); format!("{:?}", reader); } #[test] fn padding() { let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_some()); assert_eq!(0, reader.csrc_count()); assert_eq!(111, reader.payload_type()); } #[test] fn padding_too_large() { // 'padding' header-flag is on, and padding length (255) in final byte is larger than the // buffer length. (Test data created by fuzzing.) let data = [ 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x0, 0x0, 0xff, ]; assert!(RtpReader::new(&data).is_err()); } #[test] fn builder_juggle() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); let buffer = reader.create_builder().build().unwrap(); assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]); }
new
identifier_name
reader.rs
}, /// The padding header at the end of the packet, if present, specifies the number of padding /// bytes, including itself, and therefore cannot be less than `1`, or greater than the /// available space. PaddingLengthInvalid(u8), } impl<'a> RtpReader<'a> { /// An RTP packet header is no fewer than 12 bytes long pub const MIN_HEADER_LEN: usize = 12; const EXTENSION_HEADER_LEN: usize = 4; /// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is /// malformed. /// /// In particular, if there is too little data in the given buffer, such that some later /// attempt to access an RTP header field would need to access bytes that are not available, /// then this method will fail up front, rather than allowing attempts to access any header /// field to fail later on. pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> { if b.len() < Self::MIN_HEADER_LEN { return Err(RtpReaderError::BufferTooShort(b.len())); } let r = RtpReader { buf: b }; if r.version()!= 2 { return Err(RtpReaderError::UnsupportedVersion(r.version())); } if r.extension_flag() { let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN; if extension_start > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_start, buffer_len: b.len(), }); } let extension_end = extension_start + r.extension_len(); if extension_end > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_end, buffer_len: b.len(), }); } } if r.payload_offset() > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len(), }); } if r.padding_flag() { let post_header_bytes = b.len() - r.payload_offset(); // with 'padding' flag set, there must be at least a single byte after the headers to // hold the padding length if post_header_bytes == 0 { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len() - 1, }); } let pad_len = r.padding_len()?; if r.payload_offset() + pad_len as usize > b.len() { return Err(RtpReaderError::PaddingLengthInvalid(pad_len)); } } Ok(r) } /// Version field value (currently only version 2 is supported, so other values will not be /// seen from this release of `rtp-rs`. pub fn version(&self) -> u8 { (self.buf[0] & 0b1100_0000) >> 6 } /// Flag indicating if padding is present at the end of the payload data. fn padding_flag(&self) -> bool { (self.buf[0] & 0b0010_0000)!= 0 } /// Returns the size of the padding at the end of this packet, or `None` if the padding flag is /// not set in the packet header pub fn padding(&self) -> Option<u8> { if self.padding_flag() { Some(self.padding_len().unwrap()) } else { None } } fn extension_flag(&self) -> bool { (self.buf[0] & 0b0001_0000)!= 0 } /// A count of the number of CSRC fields present in the RTP headers - may be `0`. /// /// See [csrc()](#method.csrc). pub fn csrc_count(&self) -> u8 { self.buf[0] & 0b0000_1111 } /// A'marker', which may have some definition in the specific RTP profile in use pub fn mark(&self) -> bool { (self.buf[1] & 0b1000_0000)!= 0 } /// Indicates the type of content carried in this RTP packet. /// /// A few types-values are defined in the standard, but in many applications of RTP the value /// of this field needs to be agreed between sender and receiver by some mechanism outside of /// RTP itself. pub fn payload_type(&self) -> u8 { self.buf[1] & 0b0111_1111 } /// The sequence number of this particular packet. /// /// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit /// value of `65535`. /// /// Receivers can identify packet losses or reordering by inspecting the value of this field /// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps /// calling code reason about sequence number problems in the face of any wraparound that might /// have legitimately happened. pub fn sequence_number(&self) -> Seq { Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16)) } /// The timestamp of this packet, given in a timebase that relates to the particular /// `payload_type` in use. /// /// It is perfectly possible for successive packets in a sequence to have the same value, or /// to have values that differ by arbitrarily large amounts. /// /// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value /// of `4294967295`. pub fn timestamp(&self) -> u32 { (self.buf[4] as u32) << 24 | (self.buf[5] as u32) << 16 | (self.buf[6] as u32) << 8 | (self.buf[7] as u32) } /// The _synchronisation source_ for this packet. Many applications of RTP do not use this /// field. pub fn ssrc(&self) -> u32 { (self.buf[8] as u32) << 24 | (self.buf[9] as u32) << 16 | (self.buf[10] as u32) << 8 | (self.buf[11] as u32) } /// A potentially empty list of _contributing sources_ for this packet. Many applications of /// RTP do not use this field. pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ { self.buf[Self::MIN_HEADER_LEN..] .chunks(4) .take(self.csrc_count() as usize) .map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32)) } /// Returns the offset of the payload for the packet pub fn payload_offset(&self) -> usize { let offset = self.csrc_end(); if self.extension_flag() { offset + Self::EXTENSION_HEADER_LEN + self.extension_len() } else { offset } } fn csrc_end(&self) -> usize { Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize } /// Returns the payload data of this RTP packet, excluding the packet's headers and any /// optional trailing padding. pub fn payload(&self) -> &'a [u8] { let pad = if self.padding_flag() { // in Self::new(), we already checked this was Ok, and will not attempt an invalid // slice below, self.padding_len().unwrap() as usize } else { 0 }; &self.buf[self.payload_offset()..self.buf.len() - pad] } fn extension_len(&self) -> usize { let offset = self.csrc_end(); // The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a // valid length. 4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize)) } // must only be used if padding() returns true fn padding_len(&self) -> Result<u8, RtpReaderError> { match self.buf[self.buf.len() - 1] { 0 => Err(RtpReaderError::PaddingLengthInvalid(0)), l => Ok(l), } } /// Returns details of the optional RTP header extension field. If there is an extension, /// the first component of the resulting tuple is the extension id, and the second is a /// byte-slice for the extension data value, to be interpreted by the application. pub fn extension(&self) -> Option<(u16, &'a [u8])> { if self.extension_flag() { let offset = self.csrc_end(); let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16); let start = offset + 4; Some((id, &self.buf[start..start + self.extension_len()])) } else { None } } /// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original /// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder` /// if required. /// /// The padding is not copied from the original since, while we do know how many padding bytes /// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly /// copying the padding could result in an incorrect result _if_ the payload is subsequently /// changed for one with a different length. /// /// If you know your output packets don't need padding, there is nothing more to do, since /// that is the default for the resulting `RtpPacketBulder`. /// /// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify /// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded /// to a 4 byte boundary. pub fn create_builder(&self) -> RtpPacketBuilder<'a> { let mut builder = RtpPacketBuilder::new() .payload_type(self.payload_type()) .marked(self.mark()) .sequence(self.sequence_number()) .ssrc(self.ssrc()) .timestamp(self.timestamp()) .payload(self.payload()); if let Some(ext) = self.extension() { builder = builder.extension(ext.0, ext.1); } for csrc in self.csrc() { builder = builder.add_csrc(csrc); } builder } } impl<'a> fmt::Debug for RtpReader<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("RtpReader") .field("version", &self.version()) .field("padding", &self.padding()) .field("extension", &self.extension().map(|(id, _)| id)) .field("csrc_count", &self.csrc_count()) .field("mark", &self.mark()) .field("payload_type", &self.payload_type()) .field("sequence_number", &self.sequence_number()) .field("timestamp", &self.timestamp()) .field("ssrc", &self.ssrc()) .field("payload_length", &self.payload().len()) .finish() } } #[cfg(test)] mod tests { use super::*; use crate::IntoSeqIterator; const TEST_RTP_PACKET: [u8; 391] = [ 0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8, 0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8, 0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8, 0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8, 0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8, 0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8, 0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8, 0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8, 0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8, 0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8, 0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8, 0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8, 0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8, 0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8, 0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8, 0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8, 0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8, 0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8, 0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8, 0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8, 0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8, 0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8, 0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8, 0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8, 0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8, 0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8, 0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8, 0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8, 0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8, 0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8, 0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8, 0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8, 0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8, 0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8, 0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8, 0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8, ]; const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [ 144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8, 191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8, 64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8, 165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8, 30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8, 120u8, 63u8, 17u8, 101u8, 55u8, 17u8 ]; #[test] fn version() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_none()); assert_eq!(0, reader.csrc_count()); assert!(reader.mark()); assert_eq!(96, reader.payload_type()); assert_eq!(Seq(10040), reader.sequence_number()); assert_eq!(1_692_665_255, reader.timestamp()); assert_eq!(0xa242_af01, reader.ssrc()); assert_eq!(379, reader.payload().len()); format!("{:?}", reader); } #[test] fn padding()
#[test] fn padding_too_large() { // 'padding' header-flag is on, and padding length (255) in final byte is larger than the // buffer length. (Test data created by fuzzing.) let data = [ 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x0, 0x0, 0xff, ]; assert!(RtpReader::new(&data).is_err()); } #[test] fn builder_juggle() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); let buffer = reader.create_builder().build().unwrap(); assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]); }
{ let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_some()); assert_eq!(0, reader.csrc_count()); assert_eq!(111, reader.payload_type()); }
identifier_body
reader.rs
}, /// The padding header at the end of the packet, if present, specifies the number of padding /// bytes, including itself, and therefore cannot be less than `1`, or greater than the /// available space. PaddingLengthInvalid(u8), } impl<'a> RtpReader<'a> { /// An RTP packet header is no fewer than 12 bytes long pub const MIN_HEADER_LEN: usize = 12; const EXTENSION_HEADER_LEN: usize = 4; /// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is /// malformed. /// /// In particular, if there is too little data in the given buffer, such that some later /// attempt to access an RTP header field would need to access bytes that are not available, /// then this method will fail up front, rather than allowing attempts to access any header /// field to fail later on. pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> { if b.len() < Self::MIN_HEADER_LEN { return Err(RtpReaderError::BufferTooShort(b.len())); } let r = RtpReader { buf: b }; if r.version()!= 2 { return Err(RtpReaderError::UnsupportedVersion(r.version())); } if r.extension_flag() { let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN; if extension_start > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_start, buffer_len: b.len(), }); } let extension_end = extension_start + r.extension_len(); if extension_end > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_end, buffer_len: b.len(), }); } } if r.payload_offset() > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len(), }); } if r.padding_flag() { let post_header_bytes = b.len() - r.payload_offset(); // with 'padding' flag set, there must be at least a single byte after the headers to // hold the padding length if post_header_bytes == 0 { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len() - 1, }); } let pad_len = r.padding_len()?; if r.payload_offset() + pad_len as usize > b.len() { return Err(RtpReaderError::PaddingLengthInvalid(pad_len)); } } Ok(r) } /// Version field value (currently only version 2 is supported, so other values will not be /// seen from this release of `rtp-rs`. pub fn version(&self) -> u8 { (self.buf[0] & 0b1100_0000) >> 6 } /// Flag indicating if padding is present at the end of the payload data. fn padding_flag(&self) -> bool { (self.buf[0] & 0b0010_0000)!= 0 } /// Returns the size of the padding at the end of this packet, or `None` if the padding flag is /// not set in the packet header pub fn padding(&self) -> Option<u8> { if self.padding_flag() { Some(self.padding_len().unwrap()) } else { None } } fn extension_flag(&self) -> bool { (self.buf[0] & 0b0001_0000)!= 0 } /// A count of the number of CSRC fields present in the RTP headers - may be `0`. /// /// See [csrc()](#method.csrc). pub fn csrc_count(&self) -> u8 { self.buf[0] & 0b0000_1111 } /// A'marker', which may have some definition in the specific RTP profile in use pub fn mark(&self) -> bool { (self.buf[1] & 0b1000_0000)!= 0 } /// Indicates the type of content carried in this RTP packet. /// /// A few types-values are defined in the standard, but in many applications of RTP the value /// of this field needs to be agreed between sender and receiver by some mechanism outside of /// RTP itself. pub fn payload_type(&self) -> u8 { self.buf[1] & 0b0111_1111 } /// The sequence number of this particular packet. /// /// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit /// value of `65535`. /// /// Receivers can identify packet losses or reordering by inspecting the value of this field /// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps /// calling code reason about sequence number problems in the face of any wraparound that might /// have legitimately happened. pub fn sequence_number(&self) -> Seq { Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16)) } /// The timestamp of this packet, given in a timebase that relates to the particular /// `payload_type` in use. /// /// It is perfectly possible for successive packets in a sequence to have the same value, or /// to have values that differ by arbitrarily large amounts. /// /// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value /// of `4294967295`. pub fn timestamp(&self) -> u32 { (self.buf[4] as u32) << 24 | (self.buf[5] as u32) << 16 | (self.buf[6] as u32) << 8 | (self.buf[7] as u32) } /// The _synchronisation source_ for this packet. Many applications of RTP do not use this /// field. pub fn ssrc(&self) -> u32 { (self.buf[8] as u32) << 24 | (self.buf[9] as u32) << 16 | (self.buf[10] as u32) << 8 | (self.buf[11] as u32) } /// A potentially empty list of _contributing sources_ for this packet. Many applications of /// RTP do not use this field. pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ { self.buf[Self::MIN_HEADER_LEN..] .chunks(4) .take(self.csrc_count() as usize) .map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32)) } /// Returns the offset of the payload for the packet pub fn payload_offset(&self) -> usize { let offset = self.csrc_end(); if self.extension_flag() { offset + Self::EXTENSION_HEADER_LEN + self.extension_len() } else { offset } } fn csrc_end(&self) -> usize { Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize } /// Returns the payload data of this RTP packet, excluding the packet's headers and any /// optional trailing padding. pub fn payload(&self) -> &'a [u8] { let pad = if self.padding_flag() { // in Self::new(), we already checked this was Ok, and will not attempt an invalid // slice below, self.padding_len().unwrap() as usize } else { 0 }; &self.buf[self.payload_offset()..self.buf.len() - pad] } fn extension_len(&self) -> usize { let offset = self.csrc_end(); // The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a // valid length. 4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize)) } // must only be used if padding() returns true fn padding_len(&self) -> Result<u8, RtpReaderError> { match self.buf[self.buf.len() - 1] { 0 => Err(RtpReaderError::PaddingLengthInvalid(0)), l => Ok(l), } } /// Returns details of the optional RTP header extension field. If there is an extension, /// the first component of the resulting tuple is the extension id, and the second is a /// byte-slice for the extension data value, to be interpreted by the application. pub fn extension(&self) -> Option<(u16, &'a [u8])> { if self.extension_flag()
else { None } } /// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original /// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder` /// if required. /// /// The padding is not copied from the original since, while we do know how many padding bytes /// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly /// copying the padding could result in an incorrect result _if_ the payload is subsequently /// changed for one with a different length. /// /// If you know your output packets don't need padding, there is nothing more to do, since /// that is the default for the resulting `RtpPacketBulder`. /// /// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify /// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded /// to a 4 byte boundary. pub fn create_builder(&self) -> RtpPacketBuilder<'a> { let mut builder = RtpPacketBuilder::new() .payload_type(self.payload_type()) .marked(self.mark()) .sequence(self.sequence_number()) .ssrc(self.ssrc()) .timestamp(self.timestamp()) .payload(self.payload()); if let Some(ext) = self.extension() { builder = builder.extension(ext.0, ext.1); } for csrc in self.csrc() { builder = builder.add_csrc(csrc); } builder } } impl<'a> fmt::Debug for RtpReader<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("RtpReader") .field("version", &self.version()) .field("padding", &self.padding()) .field("extension", &self.extension().map(|(id, _)| id)) .field("csrc_count", &self.csrc_count()) .field("mark", &self.mark()) .field("payload_type", &self.payload_type()) .field("sequence_number", &self.sequence_number()) .field("timestamp", &self.timestamp()) .field("ssrc", &self.ssrc()) .field("payload_length", &self.payload().len()) .finish() } } #[cfg(test)] mod tests { use super::*; use crate::IntoSeqIterator; const TEST_RTP_PACKET: [u8; 391] = [ 0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8, 0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8, 0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8, 0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8, 0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8, 0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8, 0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8, 0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8, 0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8, 0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8, 0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8, 0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8, 0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8, 0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8, 0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8, 0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8, 0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8, 0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8, 0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8, 0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8, 0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8, 0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8, 0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8, 0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8, 0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8, 0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8, 0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8, 0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8, 0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8, 0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8, 0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8, 0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8, 0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8, 0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8, 0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8, 0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8, ]; const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [ 144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8, 191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8, 64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8, 165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8, 30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8, 120u8, 63u8, 17u8, 101u8, 55u8, 17u8 ]; #[test] fn version() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_none()); assert_eq!(0, reader.csrc_count()); assert!(reader.mark()); assert_eq!(96, reader.payload_type()); assert_eq!(Seq(10040), reader.sequence_number()); assert_eq!(1_692_665_255, reader.timestamp()); assert_eq!(0xa242_af01, reader.ssrc()); assert_eq!(379, reader.payload().len()); format!("{:?}", reader); } #[test] fn padding() { let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_some()); assert_eq!(0, reader.csrc_count()); assert_eq!(111, reader.payload_type()); } #[test] fn padding_too_large() { // 'padding' header-flag is on, and padding length (255) in final byte is larger than the // buffer length. (Test data created by fuzzing.) let data = [ 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x0, 0x0, 0xff, ]; assert!(RtpReader::new(&data).is_err()); } #[test] fn builder_juggle() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); let buffer = reader.create_builder().build().unwrap(); assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]); }
{ let offset = self.csrc_end(); let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16); let start = offset + 4; Some((id, &self.buf[start..start + self.extension_len()])) }
conditional_block
reader.rs
}, /// The padding header at the end of the packet, if present, specifies the number of padding /// bytes, including itself, and therefore cannot be less than `1`, or greater than the /// available space. PaddingLengthInvalid(u8), } impl<'a> RtpReader<'a> { /// An RTP packet header is no fewer than 12 bytes long pub const MIN_HEADER_LEN: usize = 12; const EXTENSION_HEADER_LEN: usize = 4; /// Tries to construct a new `RtpHeader` instance, or an `RtpReaderError` if the RTP data is /// malformed. /// /// In particular, if there is too little data in the given buffer, such that some later /// attempt to access an RTP header field would need to access bytes that are not available, /// then this method will fail up front, rather than allowing attempts to access any header /// field to fail later on. pub fn new(b: &'a [u8]) -> Result<RtpReader<'_>, RtpReaderError> { if b.len() < Self::MIN_HEADER_LEN { return Err(RtpReaderError::BufferTooShort(b.len())); } let r = RtpReader { buf: b }; if r.version()!= 2 { return Err(RtpReaderError::UnsupportedVersion(r.version())); } if r.extension_flag() { let extension_start = r.csrc_end() + Self::EXTENSION_HEADER_LEN; if extension_start > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_start, buffer_len: b.len(), }); } let extension_end = extension_start + r.extension_len(); if extension_end > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: extension_end, buffer_len: b.len(), }); } } if r.payload_offset() > b.len() { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len(), }); } if r.padding_flag() { let post_header_bytes = b.len() - r.payload_offset(); // with 'padding' flag set, there must be at least a single byte after the headers to // hold the padding length if post_header_bytes == 0 { return Err(RtpReaderError::HeadersTruncated { header_len: r.payload_offset(), buffer_len: b.len() - 1, }); } let pad_len = r.padding_len()?; if r.payload_offset() + pad_len as usize > b.len() { return Err(RtpReaderError::PaddingLengthInvalid(pad_len)); } } Ok(r) } /// Version field value (currently only version 2 is supported, so other values will not be /// seen from this release of `rtp-rs`. pub fn version(&self) -> u8 { (self.buf[0] & 0b1100_0000) >> 6 } /// Flag indicating if padding is present at the end of the payload data. fn padding_flag(&self) -> bool { (self.buf[0] & 0b0010_0000)!= 0 } /// Returns the size of the padding at the end of this packet, or `None` if the padding flag is /// not set in the packet header pub fn padding(&self) -> Option<u8> { if self.padding_flag() { Some(self.padding_len().unwrap()) } else { None } } fn extension_flag(&self) -> bool { (self.buf[0] & 0b0001_0000)!= 0 } /// A count of the number of CSRC fields present in the RTP headers - may be `0`. /// /// See [csrc()](#method.csrc). pub fn csrc_count(&self) -> u8 { self.buf[0] & 0b0000_1111 } /// A'marker', which may have some definition in the specific RTP profile in use pub fn mark(&self) -> bool { (self.buf[1] & 0b1000_0000)!= 0 } /// Indicates the type of content carried in this RTP packet. /// /// A few types-values are defined in the standard, but in many applications of RTP the value /// of this field needs to be agreed between sender and receiver by some mechanism outside of /// RTP itself. pub fn payload_type(&self) -> u8 { self.buf[1] & 0b0111_1111 } /// The sequence number of this particular packet. /// /// Sequence numbers are 16 bits, and will wrap back to `0` after reaching the maximum 16-bit /// value of `65535`. /// /// Receivers can identify packet losses or reordering by inspecting the value of this field /// across a sequence of received packets. The [`Seq`](struct.Seq.html) wrapper type helps /// calling code reason about sequence number problems in the face of any wraparound that might /// have legitimately happened. pub fn sequence_number(&self) -> Seq { Seq((self.buf[2] as u16) << 8 | (self.buf[3] as u16)) } /// The timestamp of this packet, given in a timebase that relates to the particular /// `payload_type` in use. /// /// It is perfectly possible for successive packets in a sequence to have the same value, or /// to have values that differ by arbitrarily large amounts. /// /// Timestamps are 32 bits, and will wrap back to `0` after reaching the maximum 32 bit value /// of `4294967295`. pub fn timestamp(&self) -> u32 { (self.buf[4] as u32) << 24 | (self.buf[5] as u32) << 16 | (self.buf[6] as u32) << 8 | (self.buf[7] as u32) } /// The _synchronisation source_ for this packet. Many applications of RTP do not use this /// field. pub fn ssrc(&self) -> u32 { (self.buf[8] as u32) << 24 | (self.buf[9] as u32) << 16 | (self.buf[10] as u32) << 8 | (self.buf[11] as u32) } /// A potentially empty list of _contributing sources_ for this packet. Many applications of /// RTP do not use this field. pub fn csrc(&self) -> impl Iterator<Item = u32> + '_ { self.buf[Self::MIN_HEADER_LEN..] .chunks(4) .take(self.csrc_count() as usize) .map(|b| (b[0] as u32) << 24 | (b[1] as u32) << 16 | (b[2] as u32) << 8 | (b[3] as u32)) } /// Returns the offset of the payload for the packet pub fn payload_offset(&self) -> usize { let offset = self.csrc_end(); if self.extension_flag() { offset + Self::EXTENSION_HEADER_LEN + self.extension_len() } else { offset } } fn csrc_end(&self) -> usize { Self::MIN_HEADER_LEN + (4 * self.csrc_count()) as usize } /// Returns the payload data of this RTP packet, excluding the packet's headers and any /// optional trailing padding. pub fn payload(&self) -> &'a [u8] { let pad = if self.padding_flag() { // in Self::new(), we already checked this was Ok, and will not attempt an invalid // slice below, self.padding_len().unwrap() as usize } else { 0 }; &self.buf[self.payload_offset()..self.buf.len() - pad] } fn extension_len(&self) -> usize { let offset = self.csrc_end(); // The 16 bit extension length header gives a length in 32 bit (4 byte) units; 0 is a // valid length. 4 * ((self.buf[offset + 2] as usize) << 8 | (self.buf[offset + 3] as usize)) } // must only be used if padding() returns true fn padding_len(&self) -> Result<u8, RtpReaderError> { match self.buf[self.buf.len() - 1] { 0 => Err(RtpReaderError::PaddingLengthInvalid(0)), l => Ok(l), } } /// Returns details of the optional RTP header extension field. If there is an extension, /// the first component of the resulting tuple is the extension id, and the second is a /// byte-slice for the extension data value, to be interpreted by the application. pub fn extension(&self) -> Option<(u16, &'a [u8])> { if self.extension_flag() { let offset = self.csrc_end(); let id = (self.buf[offset] as u16) << 8 | (self.buf[offset + 1] as u16); let start = offset + 4; Some((id, &self.buf[start..start + self.extension_len()])) } else { None } } /// Create a `RtpPacketBuilder` from this packet. **Note** that padding from the original /// packet will not be used by default, and must be defined on the resulting `RtpPacketBuilder` /// if required. /// /// The padding is not copied from the original since, while we do know how many padding bytes /// were present, we don't know if the intent was to round to 2 bytes, 4 bytes, etc. Blindly /// copying the padding could result in an incorrect result _if_ the payload is subsequently /// changed for one with a different length. /// /// If you know your output packets don't need padding, there is nothing more to do, since /// that is the default for the resulting `RtpPacketBulder`. /// /// If you know you output packets need padding to 4 bytes, then you _must_ explicitly specify /// this using `builder.padded(Pad::round_to(4))` even if the source packet was already padded /// to a 4 byte boundary. pub fn create_builder(&self) -> RtpPacketBuilder<'a> { let mut builder = RtpPacketBuilder::new() .payload_type(self.payload_type()) .marked(self.mark()) .sequence(self.sequence_number()) .ssrc(self.ssrc()) .timestamp(self.timestamp()) .payload(self.payload()); if let Some(ext) = self.extension() { builder = builder.extension(ext.0, ext.1); } for csrc in self.csrc() { builder = builder.add_csrc(csrc); } builder } } impl<'a> fmt::Debug for RtpReader<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { f.debug_struct("RtpReader") .field("version", &self.version()) .field("padding", &self.padding()) .field("extension", &self.extension().map(|(id, _)| id)) .field("csrc_count", &self.csrc_count()) .field("mark", &self.mark()) .field("payload_type", &self.payload_type()) .field("sequence_number", &self.sequence_number()) .field("timestamp", &self.timestamp()) .field("ssrc", &self.ssrc()) .field("payload_length", &self.payload().len()) .finish() } } #[cfg(test)] mod tests { use super::*; use crate::IntoSeqIterator; const TEST_RTP_PACKET: [u8; 391] = [ 0x80u8, 0xe0u8, 0x27u8, 0x38u8, 0x64u8, 0xe4u8, 0x05u8, 0xa7u8, 0xa2u8, 0x42u8, 0xafu8, 0x01u8, 0x3cu8, 0x41u8, 0xa4u8, 0xa3u8, 0x5du8, 0x13u8, 0xf9u8, 0xcau8, 0x2cu8, 0x7eu8, 0xa9u8, 0x77u8, 0xaau8, 0xdeu8, 0xf7u8, 0xcau8, 0xa4u8, 0x28u8, 0xfeu8, 0xdfu8, 0xc8u8, 0x68u8, 0xf1u8, 0xd9u8, 0x4fu8, 0x69u8, 0x96u8, 0xa0u8, 0x57u8, 0xbau8, 0xfbu8, 0x07u8, 0xc4u8, 0xc4u8, 0xd4u8, 0xfeu8, 0xf8u8, 0xc7u8, 0xb2u8, 0x0du8, 0x01u8, 0x12u8, 0x14u8, 0x36u8, 0x69u8, 0x75u8, 0xf2u8, 0xb4u8, 0xb5u8, 0xf2u8, 0x54u8, 0x2eu8, 0xc2u8, 0x66u8, 0x51u8, 0xebu8, 0x41u8, 0x80u8, 0x96u8, 0xceu8, 0x8eu8, 0x60u8, 0xb2u8, 0x44u8, 0xaeu8, 0xe5u8, 0x43u8, 0xadu8, 0x7bu8, 0x48u8, 0x89u8, 0x44u8, 0xb0u8, 0x48u8, 0x67u8, 0x6au8, 0x84u8, 0x7au8, 0x0au8, 0x8fu8, 0x71u8, 0x50u8, 0x69u8, 0xe6u8, 0xb1u8, 0x05u8, 0x40u8, 0xb9u8, 0x8cu8, 0xafu8, 0x42u8, 0xcbu8, 0x58u8, 0x83u8, 0xcbu8, 0x32u8, 0x64u8, 0xd2u8, 0x2au8, 0x7du8, 0x4eu8, 0xf5u8, 0xbcu8, 0x33u8, 0xfeu8, 0xb7u8, 0x0cu8, 0xe4u8, 0x8eu8, 0x38u8, 0xbcu8, 0x3au8, 0x1eu8, 0xd2u8, 0x56u8, 0x13u8, 0x23u8, 0x47u8, 0xcfu8, 0x42u8, 0xa9u8, 0xbbu8, 0xcfu8, 0x48u8, 0xf3u8, 0x11u8, 0xc7u8, 0xfdu8, 0x73u8, 0x2du8, 0xe1u8, 0xeau8, 0x47u8, 0x5cu8, 0x5du8, 0x11u8, 0x96u8, 0x1eu8, 0xc4u8, 0x70u8, 0x32u8, 0x77u8, 0xabu8, 0x31u8, 0x7au8, 0xb1u8, 0x22u8, 0x14u8, 0x8du8, 0x2bu8, 0xecu8, 0x3du8, 0x67u8, 0x97u8, 0xa4u8, 0x40u8, 0x21u8, 0x1eu8, 0xceu8, 0xb0u8, 0x63u8, 0x01u8, 0x75u8, 0x77u8, 0x03u8, 0x15u8, 0xcdu8, 0x35u8, 0xa1u8, 0x2fu8, 0x4bu8, 0xa0u8, 0xacu8, 0x8du8, 0xd7u8, 0x78u8, 0x02u8, 0x23u8, 0xcbu8, 0xfdu8, 0x82u8, 0x4eu8, 0x0bu8, 0x79u8, 0x7fu8, 0x39u8, 0x70u8, 0x26u8, 0x66u8, 0x37u8, 0xe9u8, 0x93u8, 0x91u8, 0x7bu8, 0xc4u8, 0x80u8, 0xa9u8, 0x18u8, 0x23u8, 0xb3u8, 0xa1u8, 0x04u8, 0x72u8, 0x53u8, 0xa0u8, 0xb4u8, 0xffu8, 0x79u8, 0x1fu8, 0x07u8, 0xe2u8, 0x5du8, 0x01u8, 0x7du8, 0x63u8, 0xc1u8, 0x16u8, 0x89u8, 0x23u8, 0x4au8, 0x17u8, 0xbbu8, 0x6du8, 0x0du8, 0x81u8, 0x1au8, 0xbbu8, 0x94u8, 0x5bu8, 0xcbu8, 0x2du8, 0xdeu8, 0x98u8, 0x40u8, 0x22u8, 0x62u8, 0x41u8, 0xc2u8, 0x9bu8, 0x95u8, 0x85u8, 0x60u8, 0xf0u8, 0xdeu8, 0x6fu8, 0xeeu8, 0x93u8, 0xccu8, 0x15u8, 0x76u8, 0xfbu8, 0xf8u8, 0x8au8, 0x1du8, 0xe1u8, 0x83u8, 0x12u8, 0xabu8, 0x25u8, 0x6au8, 0x7bu8, 0x89u8, 0xedu8, 0x70u8, 0x4eu8, 0xcdu8, 0x1eu8, 0xa9u8, 0xfcu8, 0xa8u8, 0x22u8, 0x91u8, 0x5fu8, 0x50u8, 0x68u8, 0x6au8, 0x35u8, 0xf7u8, 0xc1u8, 0x1eu8, 0x15u8, 0x37u8, 0xb4u8, 0x30u8, 0x62u8, 0x56u8, 0x1eu8, 0x2eu8, 0xe0u8, 0x2du8, 0xa4u8, 0x1eu8, 0x75u8, 0x5bu8, 0xc7u8, 0xd0u8, 0x5bu8, 0x9du8, 0xd0u8, 0x25u8, 0x76u8, 0xdfu8, 0xa7u8, 0x19u8, 0x12u8, 0x93u8, 0xf4u8, 0xebu8, 0x02u8, 0xf2u8, 0x4au8, 0x13u8, 0xe9u8, 0x1cu8, 0x17u8, 0xccu8, 0x11u8, 0x87u8, 0x9cu8, 0xa6u8, 0x40u8, 0x27u8, 0xb7u8, 0x2bu8, 0x9bu8, 0x6fu8, 0x23u8, 0x06u8, 0x2cu8, 0xc6u8, 0x6eu8, 0xc1u8, 0x9au8, 0xbdu8, 0x59u8, 0x37u8, 0xe9u8, 0x9eu8, 0x76u8, 0xf6u8, 0xc1u8, 0xbcu8, 0x81u8, 0x18u8, 0x60u8, 0xc9u8, 0x64u8, 0x0au8, 0xb3u8, 0x6eu8, 0xf3u8, 0x6bu8, 0xb9u8, 0xd0u8, 0xf6u8, 0xe0u8, 0x9bu8, 0x91u8, 0xc1u8, 0x0fu8, 0x96u8, 0xefu8, 0xbcu8, 0x5fu8, 0x8eu8, 0x86u8, 0x56u8, 0x5au8, 0xfcu8, 0x7au8, 0x8bu8, 0xddu8, 0x9au8, 0x1cu8, 0xf6u8, 0xb4u8, 0x85u8, 0xf4u8, 0xb0u8, ]; const TEST_RTP_PACKET_WITH_EXTENSION: [u8; 63] = [ 144u8, 111u8, 79u8, 252u8, 224u8, 94u8, 104u8, 203u8, 30u8, 112u8, 208u8, 191u8, 190u8, 222u8, 0u8, 3u8, 34u8, 175u8, 185u8, 88u8, 49u8, 0u8, 171u8, 64u8, 48u8, 16u8, 219u8, 0u8, 104u8, 9u8, 136u8, 90u8, 174u8, 145u8, 68u8, 165u8, 227u8, 178u8, 187u8, 68u8, 166u8, 66u8, 235u8, 40u8, 171u8, 135u8, 30u8, 174u8, 130u8, 239u8, 205u8, 14u8, 211u8, 232u8, 65u8, 67u8, 153u8, 120u8, 63u8, 17u8, 101u8, 55u8, 17u8 ]; #[test] fn version() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); assert_eq!(2, reader.version()); assert!(reader.padding().is_none()); assert!(reader.extension().is_none()); assert_eq!(0, reader.csrc_count()); assert!(reader.mark()); assert_eq!(96, reader.payload_type()); assert_eq!(Seq(10040), reader.sequence_number()); assert_eq!(1_692_665_255, reader.timestamp()); assert_eq!(0xa242_af01, reader.ssrc()); assert_eq!(379, reader.payload().len()); format!("{:?}", reader); } #[test] fn padding() { let reader = RtpReader::new(&TEST_RTP_PACKET_WITH_EXTENSION).unwrap(); assert_eq!(2, reader.version());
} #[test] fn padding_too_large() { // 'padding' header-flag is on, and padding length (255) in final byte is larger than the // buffer length. (Test data created by fuzzing.) let data = [ 0xa2, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x1, 0x0, 0xff, 0xa2, 0xa2, 0xa2, 0xa2, 0x90, 0x0, 0x0, 0x0, 0x0, 0xff, ]; assert!(RtpReader::new(&data).is_err()); } #[test] fn builder_juggle() { let reader = RtpReader::new(&TEST_RTP_PACKET).unwrap(); let buffer = reader.create_builder().build().unwrap(); assert_eq!(&buffer.as_slice()[..], &TEST_RTP_PACKET[..]); } #[t
assert!(reader.padding().is_none()); assert!(reader.extension().is_some()); assert_eq!(0, reader.csrc_count()); assert_eq!(111, reader.payload_type());
random_line_split
lib.rs
//! Linear regression //! //! `linreg` calculates linear regressions for two dimensional measurements, also known as //! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). //! //! Base for all calculations of linear regression is the simple model found in //! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model. //! //! ## Example use //! //! ```rust //! use linreg::{linear_regression, linear_regression_of}; //! //! // Example 1: x and y values stored in two different vectors //! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; //! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! //! //! // Example 2: x and y values stored as tuples //! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), //! (2.0, 4.0), //! (3.0, 5.0), //! (4.0, 4.0), //! (5.0, 5.0)]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); //! //! //! // Example 3: directly operating on integer (converted to float as required) //! let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; //! let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! ``` #![no_std] extern crate num_traits; use num_traits::float::FloatCore; #[cfg(test)] #[macro_use] extern crate std; use core::iter::Iterator; use core::iter::Sum; use displaydoc::Display; /// The kinds of errors that can occur when calculating a linear regression. #[derive(Copy, Clone, Display, Debug, PartialEq)] pub enum Error { /// The slope is too steep to represent, approaching infinity. TooSteep, /// Failed to calculate mean. /// /// This means the input was empty or had too many elements. Mean, /// Lengths of the inputs are different. InputLenDif, /// Can't compute linear regression of zero elements NoElements, } /// Single-pass simple linear regression. /// /// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus /// does not require a second pass over the input data. /// /// Returns `Ok((slope, intercept))` of the regression line. /// /// # Errors /// /// Errors if the number of elements is too large to be represented as `F` or /// the slope is too steep to represent, approaching infinity. pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { details::lin_reg_imprecise_components(xys)?.finish() } /// A module containing the building parts of the main API. /// You can use these if you want to have more control over the linear regression mod details { use super::Error; use num_traits::float::FloatCore; /// Low level linear regression primitive for pushing values instead of fetching them /// from an iterator #[derive(Debug)] pub struct Accumulator<F: FloatCore> { x_mean: F, y_mean: F, x_mul_y_mean: F, x_squared_mean: F, n: usize, } impl<F: FloatCore> Default for Accumulator<F> { fn default() -> Self { Self::new() } } impl<F: FloatCore> Accumulator<F> { pub fn new() -> Self { Self { x_mean: F::zero(), y_mean: F::zero(), x_mul_y_mean: F::zero(), x_squared_mean: F::zero(), n: 0, } } pub fn push(&mut self, x: F, y: F) { self.x_mean = self.x_mean + x; self.y_mean = self.y_mean + y; self.x_mul_y_mean = self.x_mul_y_mean + x * y; self.x_squared_mean = self.x_squared_mean + x * x; self.n += 1; } pub fn normalize(&mut self) -> Result<(), Error> { match self.n { 1 => return Ok(()), 0 => return Err(Error::NoElements), _ => {} } let n = F::from(self.n).ok_or(Error::Mean)?; self.n = 1; self.x_mean = self.x_mean / n; self.y_mean = self.y_mean / n; self.x_mul_y_mean = self.x_mul_y_mean / n; self.x_squared_mean = self.x_squared_mean / n; Ok(()) } pub fn parts(mut self) -> Result<(F, F, F, F), Error> { self.normalize()?; let Self { x_mean, y_mean, x_mul_y_mean, x_squared_mean, .. } = self; Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean)) } pub fn finish(self) -> Result<(F, F), Error> { let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?; let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean); let intercept = y_mean - slope * x_mean; if slope.is_nan() { return Err(Error::TooSteep); } Ok((slope, intercept)) } } pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { let mut acc = Accumulator::new(); for (x, y) in xys { acc.push(x, y); } acc.normalize()?; Ok(acc) } } /// Calculates a linear regression with a known mean. /// /// Lower-level linear regression function. Assumes that `x_mean` and `y_mean` /// have already been calculated. Returns `Error::DivByZero` if /// /// * the slope is too steep to represent, approaching infinity. /// /// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty. /// /// Returns `Ok((slope, intercept))` of the regression line. pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error> where I: Iterator<Item = (F, F)>, F: FloatCore, { // SUM (x-mean(x))^2 let mut xxm2 = F::zero(); // SUM (x-mean(x)) (y-mean(y)) let mut xmym2 = F::zero(); for (x, y) in xys { xxm2 = xxm2 + (x - x_mean) * (x - x_mean); xmym2 = xmym2 + (x - x_mean) * (y - y_mean); } let slope = xmym2 / xxm2; // we check for divide-by-zero after the fact if slope.is_nan() { return Err(Error::TooSteep); } let intercept = y_mean - slope * x_mean; Ok((slope, intercept)) } /// Two-pass simple linear regression from slices. /// /// Calculates the linear regression from two slices, one for x- and one for y-values, by /// calculating the mean and then calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xs` and `ys` differ in length /// * `xs` or `ys` are empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` /// pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore + Sum, { if xs.len()!= ys.len() { return Err(Error::InputLenDif); } if xs.is_empty()
let x_sum: F = xs.iter().cloned().map(Into::into).sum(); let n = F::from(xs.len()).ok_or(Error::Mean)?; let x_mean = x_sum / n; let y_sum: F = ys.iter().cloned().map(Into::into).sum(); let y_mean = y_sum / n; lin_reg( xs.iter() .map(|i| i.clone().into()) .zip(ys.iter().map(|i| i.clone().into())), x_mean, y_mean, ) } /// Two-pass linear regression from tuples. /// /// Calculates the linear regression from a slice of tuple values by first calculating the mean /// before calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xys` is empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore, { if xys.is_empty() { return Err(Error::Mean); } // We're handrolling the mean computation here, because our generic implementation can't handle tuples. // If we ran the generic impl on each tuple field, that would be very cache inefficient let n = F::from(xys.len()).ok_or(Error::Mean)?; let (x_sum, y_sum) = xys .iter() .cloned() .fold((F::zero(), F::zero()), |(sx, sy), (x, y)| { (sx + x.into(), sy + y.into()) }); let x_mean = x_sum / n; let y_mean = y_sum / n; lin_reg( xys.iter() .map(|(x, y)| (x.clone().into(), y.clone().into())), x_mean, y_mean, ) } #[cfg(test)] mod tests { use std::vec::Vec; use super::*; #[test] fn float_slices_regression() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn lin_reg_imprecises_vs_linreg() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap(); let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap(); assert!(f64::abs(x1 - x2) < 0.00001); assert!(f64::abs(y1 - y2) < 0.00001); } #[test] fn int_slices_regression() { let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn float_tuples_regression() { let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } #[test] fn int_tuples_regression() { let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } }
{ return Err(Error::Mean); }
conditional_block
lib.rs
//! Linear regression //! //! `linreg` calculates linear regressions for two dimensional measurements, also known as //! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). //! //! Base for all calculations of linear regression is the simple model found in //! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model. //! //! ## Example use //! //! ```rust //! use linreg::{linear_regression, linear_regression_of}; //! //! // Example 1: x and y values stored in two different vectors //! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; //! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! //! //! // Example 2: x and y values stored as tuples //! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), //! (2.0, 4.0), //! (3.0, 5.0), //! (4.0, 4.0), //! (5.0, 5.0)]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); //! //! //! // Example 3: directly operating on integer (converted to float as required) //! let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; //! let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! ``` #![no_std] extern crate num_traits; use num_traits::float::FloatCore; #[cfg(test)] #[macro_use] extern crate std; use core::iter::Iterator; use core::iter::Sum; use displaydoc::Display; /// The kinds of errors that can occur when calculating a linear regression. #[derive(Copy, Clone, Display, Debug, PartialEq)] pub enum Error { /// The slope is too steep to represent, approaching infinity. TooSteep, /// Failed to calculate mean. /// /// This means the input was empty or had too many elements. Mean, /// Lengths of the inputs are different. InputLenDif, /// Can't compute linear regression of zero elements NoElements, } /// Single-pass simple linear regression. /// /// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus /// does not require a second pass over the input data. /// /// Returns `Ok((slope, intercept))` of the regression line. /// /// # Errors /// /// Errors if the number of elements is too large to be represented as `F` or /// the slope is too steep to represent, approaching infinity. pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { details::lin_reg_imprecise_components(xys)?.finish() } /// A module containing the building parts of the main API. /// You can use these if you want to have more control over the linear regression mod details { use super::Error; use num_traits::float::FloatCore; /// Low level linear regression primitive for pushing values instead of fetching them /// from an iterator #[derive(Debug)] pub struct Accumulator<F: FloatCore> { x_mean: F, y_mean: F, x_mul_y_mean: F, x_squared_mean: F, n: usize, } impl<F: FloatCore> Default for Accumulator<F> { fn default() -> Self { Self::new() } } impl<F: FloatCore> Accumulator<F> { pub fn new() -> Self { Self { x_mean: F::zero(), y_mean: F::zero(), x_mul_y_mean: F::zero(), x_squared_mean: F::zero(), n: 0, } } pub fn push(&mut self, x: F, y: F) { self.x_mean = self.x_mean + x; self.y_mean = self.y_mean + y; self.x_mul_y_mean = self.x_mul_y_mean + x * y; self.x_squared_mean = self.x_squared_mean + x * x; self.n += 1; } pub fn normalize(&mut self) -> Result<(), Error>
pub fn parts(mut self) -> Result<(F, F, F, F), Error> { self.normalize()?; let Self { x_mean, y_mean, x_mul_y_mean, x_squared_mean, .. } = self; Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean)) } pub fn finish(self) -> Result<(F, F), Error> { let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?; let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean); let intercept = y_mean - slope * x_mean; if slope.is_nan() { return Err(Error::TooSteep); } Ok((slope, intercept)) } } pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { let mut acc = Accumulator::new(); for (x, y) in xys { acc.push(x, y); } acc.normalize()?; Ok(acc) } } /// Calculates a linear regression with a known mean. /// /// Lower-level linear regression function. Assumes that `x_mean` and `y_mean` /// have already been calculated. Returns `Error::DivByZero` if /// /// * the slope is too steep to represent, approaching infinity. /// /// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty. /// /// Returns `Ok((slope, intercept))` of the regression line. pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error> where I: Iterator<Item = (F, F)>, F: FloatCore, { // SUM (x-mean(x))^2 let mut xxm2 = F::zero(); // SUM (x-mean(x)) (y-mean(y)) let mut xmym2 = F::zero(); for (x, y) in xys { xxm2 = xxm2 + (x - x_mean) * (x - x_mean); xmym2 = xmym2 + (x - x_mean) * (y - y_mean); } let slope = xmym2 / xxm2; // we check for divide-by-zero after the fact if slope.is_nan() { return Err(Error::TooSteep); } let intercept = y_mean - slope * x_mean; Ok((slope, intercept)) } /// Two-pass simple linear regression from slices. /// /// Calculates the linear regression from two slices, one for x- and one for y-values, by /// calculating the mean and then calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xs` and `ys` differ in length /// * `xs` or `ys` are empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` /// pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore + Sum, { if xs.len()!= ys.len() { return Err(Error::InputLenDif); } if xs.is_empty() { return Err(Error::Mean); } let x_sum: F = xs.iter().cloned().map(Into::into).sum(); let n = F::from(xs.len()).ok_or(Error::Mean)?; let x_mean = x_sum / n; let y_sum: F = ys.iter().cloned().map(Into::into).sum(); let y_mean = y_sum / n; lin_reg( xs.iter() .map(|i| i.clone().into()) .zip(ys.iter().map(|i| i.clone().into())), x_mean, y_mean, ) } /// Two-pass linear regression from tuples. /// /// Calculates the linear regression from a slice of tuple values by first calculating the mean /// before calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xys` is empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore, { if xys.is_empty() { return Err(Error::Mean); } // We're handrolling the mean computation here, because our generic implementation can't handle tuples. // If we ran the generic impl on each tuple field, that would be very cache inefficient let n = F::from(xys.len()).ok_or(Error::Mean)?; let (x_sum, y_sum) = xys .iter() .cloned() .fold((F::zero(), F::zero()), |(sx, sy), (x, y)| { (sx + x.into(), sy + y.into()) }); let x_mean = x_sum / n; let y_mean = y_sum / n; lin_reg( xys.iter() .map(|(x, y)| (x.clone().into(), y.clone().into())), x_mean, y_mean, ) } #[cfg(test)] mod tests { use std::vec::Vec; use super::*; #[test] fn float_slices_regression() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn lin_reg_imprecises_vs_linreg() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap(); let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap(); assert!(f64::abs(x1 - x2) < 0.00001); assert!(f64::abs(y1 - y2) < 0.00001); } #[test] fn int_slices_regression() { let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn float_tuples_regression() { let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } #[test] fn int_tuples_regression() { let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } }
{ match self.n { 1 => return Ok(()), 0 => return Err(Error::NoElements), _ => {} } let n = F::from(self.n).ok_or(Error::Mean)?; self.n = 1; self.x_mean = self.x_mean / n; self.y_mean = self.y_mean / n; self.x_mul_y_mean = self.x_mul_y_mean / n; self.x_squared_mean = self.x_squared_mean / n; Ok(()) }
identifier_body
lib.rs
//! Linear regression //! //! `linreg` calculates linear regressions for two dimensional measurements, also known as //! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). //! //! Base for all calculations of linear regression is the simple model found in //! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model. //! //! ## Example use //! //! ```rust //! use linreg::{linear_regression, linear_regression_of}; //! //! // Example 1: x and y values stored in two different vectors //! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; //! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! //! //! // Example 2: x and y values stored as tuples //! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), //! (2.0, 4.0), //! (3.0, 5.0), //! (4.0, 4.0), //! (5.0, 5.0)]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); //! //! //! // Example 3: directly operating on integer (converted to float as required) //! let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; //! let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! ``` #![no_std] extern crate num_traits; use num_traits::float::FloatCore; #[cfg(test)] #[macro_use] extern crate std; use core::iter::Iterator; use core::iter::Sum; use displaydoc::Display; /// The kinds of errors that can occur when calculating a linear regression. #[derive(Copy, Clone, Display, Debug, PartialEq)] pub enum Error { /// The slope is too steep to represent, approaching infinity. TooSteep, /// Failed to calculate mean. /// /// This means the input was empty or had too many elements. Mean, /// Lengths of the inputs are different. InputLenDif, /// Can't compute linear regression of zero elements NoElements, } /// Single-pass simple linear regression. /// /// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus /// does not require a second pass over the input data. /// /// Returns `Ok((slope, intercept))` of the regression line. /// /// # Errors /// /// Errors if the number of elements is too large to be represented as `F` or /// the slope is too steep to represent, approaching infinity. pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { details::lin_reg_imprecise_components(xys)?.finish() } /// A module containing the building parts of the main API. /// You can use these if you want to have more control over the linear regression mod details { use super::Error; use num_traits::float::FloatCore; /// Low level linear regression primitive for pushing values instead of fetching them /// from an iterator #[derive(Debug)] pub struct
<F: FloatCore> { x_mean: F, y_mean: F, x_mul_y_mean: F, x_squared_mean: F, n: usize, } impl<F: FloatCore> Default for Accumulator<F> { fn default() -> Self { Self::new() } } impl<F: FloatCore> Accumulator<F> { pub fn new() -> Self { Self { x_mean: F::zero(), y_mean: F::zero(), x_mul_y_mean: F::zero(), x_squared_mean: F::zero(), n: 0, } } pub fn push(&mut self, x: F, y: F) { self.x_mean = self.x_mean + x; self.y_mean = self.y_mean + y; self.x_mul_y_mean = self.x_mul_y_mean + x * y; self.x_squared_mean = self.x_squared_mean + x * x; self.n += 1; } pub fn normalize(&mut self) -> Result<(), Error> { match self.n { 1 => return Ok(()), 0 => return Err(Error::NoElements), _ => {} } let n = F::from(self.n).ok_or(Error::Mean)?; self.n = 1; self.x_mean = self.x_mean / n; self.y_mean = self.y_mean / n; self.x_mul_y_mean = self.x_mul_y_mean / n; self.x_squared_mean = self.x_squared_mean / n; Ok(()) } pub fn parts(mut self) -> Result<(F, F, F, F), Error> { self.normalize()?; let Self { x_mean, y_mean, x_mul_y_mean, x_squared_mean, .. } = self; Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean)) } pub fn finish(self) -> Result<(F, F), Error> { let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?; let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean); let intercept = y_mean - slope * x_mean; if slope.is_nan() { return Err(Error::TooSteep); } Ok((slope, intercept)) } } pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { let mut acc = Accumulator::new(); for (x, y) in xys { acc.push(x, y); } acc.normalize()?; Ok(acc) } } /// Calculates a linear regression with a known mean. /// /// Lower-level linear regression function. Assumes that `x_mean` and `y_mean` /// have already been calculated. Returns `Error::DivByZero` if /// /// * the slope is too steep to represent, approaching infinity. /// /// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty. /// /// Returns `Ok((slope, intercept))` of the regression line. pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error> where I: Iterator<Item = (F, F)>, F: FloatCore, { // SUM (x-mean(x))^2 let mut xxm2 = F::zero(); // SUM (x-mean(x)) (y-mean(y)) let mut xmym2 = F::zero(); for (x, y) in xys { xxm2 = xxm2 + (x - x_mean) * (x - x_mean); xmym2 = xmym2 + (x - x_mean) * (y - y_mean); } let slope = xmym2 / xxm2; // we check for divide-by-zero after the fact if slope.is_nan() { return Err(Error::TooSteep); } let intercept = y_mean - slope * x_mean; Ok((slope, intercept)) } /// Two-pass simple linear regression from slices. /// /// Calculates the linear regression from two slices, one for x- and one for y-values, by /// calculating the mean and then calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xs` and `ys` differ in length /// * `xs` or `ys` are empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` /// pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore + Sum, { if xs.len()!= ys.len() { return Err(Error::InputLenDif); } if xs.is_empty() { return Err(Error::Mean); } let x_sum: F = xs.iter().cloned().map(Into::into).sum(); let n = F::from(xs.len()).ok_or(Error::Mean)?; let x_mean = x_sum / n; let y_sum: F = ys.iter().cloned().map(Into::into).sum(); let y_mean = y_sum / n; lin_reg( xs.iter() .map(|i| i.clone().into()) .zip(ys.iter().map(|i| i.clone().into())), x_mean, y_mean, ) } /// Two-pass linear regression from tuples. /// /// Calculates the linear regression from a slice of tuple values by first calculating the mean /// before calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xys` is empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore, { if xys.is_empty() { return Err(Error::Mean); } // We're handrolling the mean computation here, because our generic implementation can't handle tuples. // If we ran the generic impl on each tuple field, that would be very cache inefficient let n = F::from(xys.len()).ok_or(Error::Mean)?; let (x_sum, y_sum) = xys .iter() .cloned() .fold((F::zero(), F::zero()), |(sx, sy), (x, y)| { (sx + x.into(), sy + y.into()) }); let x_mean = x_sum / n; let y_mean = y_sum / n; lin_reg( xys.iter() .map(|(x, y)| (x.clone().into(), y.clone().into())), x_mean, y_mean, ) } #[cfg(test)] mod tests { use std::vec::Vec; use super::*; #[test] fn float_slices_regression() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn lin_reg_imprecises_vs_linreg() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap(); let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap(); assert!(f64::abs(x1 - x2) < 0.00001); assert!(f64::abs(y1 - y2) < 0.00001); } #[test] fn int_slices_regression() { let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn float_tuples_regression() { let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } #[test] fn int_tuples_regression() { let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } }
Accumulator
identifier_name
lib.rs
//! Linear regression //! //! `linreg` calculates linear regressions for two dimensional measurements, also known as //! [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression). //! //! Base for all calculations of linear regression is the simple model found in //! https://en.wikipedia.org/wiki/Ordinary_least_squares#Simple_linear_regression_model. //! //! ## Example use //! //! ```rust //! use linreg::{linear_regression, linear_regression_of}; //! //! // Example 1: x and y values stored in two different vectors //! let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; //! let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! //! //! // Example 2: x and y values stored as tuples //! let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), //! (2.0, 4.0), //! (3.0, 5.0), //! (4.0, 4.0), //! (5.0, 5.0)]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); //! //! //! // Example 3: directly operating on integer (converted to float as required) //! let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; //! let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; //! //! assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); //! ``` #![no_std] extern crate num_traits; use num_traits::float::FloatCore; #[cfg(test)] #[macro_use] extern crate std; use core::iter::Iterator; use core::iter::Sum; use displaydoc::Display; /// The kinds of errors that can occur when calculating a linear regression. #[derive(Copy, Clone, Display, Debug, PartialEq)] pub enum Error { /// The slope is too steep to represent, approaching infinity. TooSteep, /// Failed to calculate mean. /// /// This means the input was empty or had too many elements. Mean, /// Lengths of the inputs are different. InputLenDif, /// Can't compute linear regression of zero elements NoElements, } /// Single-pass simple linear regression. /// /// Similar to `lin_reg`, but does not require a mean value to be computed in advance and thus /// does not require a second pass over the input data. /// /// Returns `Ok((slope, intercept))` of the regression line. /// /// # Errors /// /// Errors if the number of elements is too large to be represented as `F` or /// the slope is too steep to represent, approaching infinity. pub fn lin_reg_imprecise<I, F>(xys: I) -> Result<(F, F), Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { details::lin_reg_imprecise_components(xys)?.finish() } /// A module containing the building parts of the main API. /// You can use these if you want to have more control over the linear regression mod details { use super::Error; use num_traits::float::FloatCore; /// Low level linear regression primitive for pushing values instead of fetching them /// from an iterator #[derive(Debug)] pub struct Accumulator<F: FloatCore> { x_mean: F, y_mean: F, x_mul_y_mean: F, x_squared_mean: F, n: usize, } impl<F: FloatCore> Default for Accumulator<F> { fn default() -> Self { Self::new() } } impl<F: FloatCore> Accumulator<F> { pub fn new() -> Self { Self { x_mean: F::zero(), y_mean: F::zero(), x_mul_y_mean: F::zero(), x_squared_mean: F::zero(), n: 0, } } pub fn push(&mut self, x: F, y: F) { self.x_mean = self.x_mean + x; self.y_mean = self.y_mean + y; self.x_mul_y_mean = self.x_mul_y_mean + x * y; self.x_squared_mean = self.x_squared_mean + x * x; self.n += 1; } pub fn normalize(&mut self) -> Result<(), Error> { match self.n { 1 => return Ok(()), 0 => return Err(Error::NoElements), _ => {} } let n = F::from(self.n).ok_or(Error::Mean)?; self.n = 1; self.x_mean = self.x_mean / n; self.y_mean = self.y_mean / n; self.x_mul_y_mean = self.x_mul_y_mean / n; self.x_squared_mean = self.x_squared_mean / n; Ok(()) } pub fn parts(mut self) -> Result<(F, F, F, F), Error> { self.normalize()?; let Self { x_mean, y_mean, x_mul_y_mean, x_squared_mean, .. } = self; Ok((x_mean, y_mean, x_mul_y_mean, x_squared_mean)) } pub fn finish(self) -> Result<(F, F), Error> { let (x_mean, y_mean, x_mul_y_mean, x_squared_mean) = self.parts()?; let slope = (x_mul_y_mean - x_mean * y_mean) / (x_squared_mean - x_mean * x_mean); let intercept = y_mean - slope * x_mean; if slope.is_nan() { return Err(Error::TooSteep); } Ok((slope, intercept)) } } pub fn lin_reg_imprecise_components<I, F>(xys: I) -> Result<Accumulator<F>, Error> where F: FloatCore, I: Iterator<Item = (F, F)>, { let mut acc = Accumulator::new(); for (x, y) in xys { acc.push(x, y); } acc.normalize()?; Ok(acc) } } /// Calculates a linear regression with a known mean. /// /// Lower-level linear regression function. Assumes that `x_mean` and `y_mean` /// have already been calculated. Returns `Error::DivByZero` if /// /// * the slope is too steep to represent, approaching infinity. /// /// Since there is a mean, this function assumes that `xs` and `ys` are both non-empty. /// /// Returns `Ok((slope, intercept))` of the regression line. pub fn lin_reg<I, F>(xys: I, x_mean: F, y_mean: F) -> Result<(F, F), Error> where I: Iterator<Item = (F, F)>, F: FloatCore, { // SUM (x-mean(x))^2 let mut xxm2 = F::zero(); // SUM (x-mean(x)) (y-mean(y)) let mut xmym2 = F::zero(); for (x, y) in xys { xxm2 = xxm2 + (x - x_mean) * (x - x_mean); xmym2 = xmym2 + (x - x_mean) * (y - y_mean); } let slope = xmym2 / xxm2; // we check for divide-by-zero after the fact if slope.is_nan() { return Err(Error::TooSteep); } let intercept = y_mean - slope * x_mean; Ok((slope, intercept)) } /// Two-pass simple linear regression from slices. /// /// Calculates the linear regression from two slices, one for x- and one for y-values, by /// calculating the mean and then calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xs` and `ys` differ in length /// * `xs` or `ys` are empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` /// pub fn linear_regression<X, Y, F>(xs: &[X], ys: &[Y]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore + Sum, { if xs.len()!= ys.len() { return Err(Error::InputLenDif); } if xs.is_empty() { return Err(Error::Mean); } let x_sum: F = xs.iter().cloned().map(Into::into).sum(); let n = F::from(xs.len()).ok_or(Error::Mean)?; let x_mean = x_sum / n; let y_sum: F = ys.iter().cloned().map(Into::into).sum(); let y_mean = y_sum / n; lin_reg( xs.iter() .map(|i| i.clone().into()) .zip(ys.iter().map(|i| i.clone().into())), x_mean, y_mean, ) } /// Two-pass linear regression from tuples. /// /// Calculates the linear regression from a slice of tuple values by first calculating the mean /// before calling `lin_reg`. /// /// Returns `Ok(slope, intercept)` of the regression line. /// /// # Errors /// /// Returns an error if /// /// * `xys` is empty /// * the slope is too steep to represent, approaching infinity /// * the number of elements cannot be represented as an `F` pub fn linear_regression_of<X, Y, F>(xys: &[(X, Y)]) -> Result<(F, F), Error> where X: Clone + Into<F>, Y: Clone + Into<F>, F: FloatCore, { if xys.is_empty() { return Err(Error::Mean); } // We're handrolling the mean computation here, because our generic implementation can't handle tuples.
.fold((F::zero(), F::zero()), |(sx, sy), (x, y)| { (sx + x.into(), sy + y.into()) }); let x_mean = x_sum / n; let y_mean = y_sum / n; lin_reg( xys.iter() .map(|(x, y)| (x.clone().into(), y.clone().into())), x_mean, y_mean, ) } #[cfg(test)] mod tests { use std::vec::Vec; use super::*; #[test] fn float_slices_regression() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn lin_reg_imprecises_vs_linreg() { let xs: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0, 5.0]; let ys: Vec<f64> = vec![2.0, 4.0, 5.0, 4.0, 5.0]; let (x1, y1) = lin_reg_imprecise(xs.iter().cloned().zip(ys.iter().cloned())).unwrap(); let (x2, y2): (f64, f64) = linear_regression(&xs, &ys).unwrap(); assert!(f64::abs(x1 - x2) < 0.00001); assert!(f64::abs(y1 - y2) < 0.00001); } #[test] fn int_slices_regression() { let xs: Vec<u8> = vec![1, 2, 3, 4, 5]; let ys: Vec<u8> = vec![2, 4, 5, 4, 5]; assert_eq!(Ok((0.6, 2.2)), linear_regression(&xs, &ys)); } #[test] fn float_tuples_regression() { let tuples: Vec<(f32, f32)> = vec![(1.0, 2.0), (2.0, 4.0), (3.0, 5.0), (4.0, 4.0), (5.0, 5.0)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } #[test] fn int_tuples_regression() { let tuples: Vec<(u32, u32)> = vec![(1, 2), (2, 4), (3, 5), (4, 4), (5, 5)]; assert_eq!(Ok((0.6, 2.2)), linear_regression_of(&tuples)); } }
// If we ran the generic impl on each tuple field, that would be very cache inefficient let n = F::from(xys.len()).ok_or(Error::Mean)?; let (x_sum, y_sum) = xys .iter() .cloned()
random_line_split
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ use core::mem::transmute; use core::mem::MaybeUninit; use core::ptr; use core::sync::atomic::fence; use core::sync::atomic::Ordering::*; use atomic::Atomic; use lazy_static::lazy_static; use crate::callbacks; use crate::signal; use crate::slot_map::SlotKey; use crate::slot_map::SlotMap; use crate::tool::Tool; use crate::tool::ToolGlobal; pub mod guard; pub type HandlerInput = libc::siginfo_t; /// We need to keep track of the sigaction that the user specified or what was /// originally provided as a default separately from what we execute directly as /// a signal handler. #[derive(Debug, Clone, Copy)] struct SigActionPair { /// Prisinte sigaction provided by the user or os guest_facing_action: libc::sigaction, /// The actual sigaction we are using internal_action: libc::sigaction, } impl SigActionPair { /// Create a new SigActionPair from the original sig action and an override /// for the default handler. The created pair will contain the original /// action, and a synthetic action with the handler replaced if an override /// is provided or if the the sa_sigaction is one of the non-function- /// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`) fn new(original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self { let mut internal_action = original.clone(); // This is safe because it is only reading from a mut static that is // guaranteed to have been completely set before this function // is called internal_action.sa_sigaction = unsafe { match (original.sa_sigaction, override_handler) { (_, Some(override_handler)) => override_handler, (libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER .expect("Default handlers should be set before registering actions"), (default_action, None) => default_action, } }; SigActionPair { guest_facing_action: original, internal_action, } } } lazy_static! { /// This is where we are storing the registered actions for each signal. /// We have to store them as Options for now because our slot map requires /// its stored type to implement default static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new(); } // The sighandler_t type has some values that aren't pointers that are still // valid. They aren't executable, so we need an executable version that we // control for each. Those are below /// Storage of our default handler for the libc::SIG_DFL static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_IGN static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_ERR static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None; /// This function invokes the function specified by the given sigaction directly /// with the given signal value or siginfo as arguments depending on whether /// the sigaction's flags indicate it is expecting a sigaction or siginfo. /// Note. In the case that the action is requesting sigaction, the 3rd argument /// to the handler will always be null. The specifications for sigaction say the /// third argument is a pointer to the context for the signal being raised, but /// we cannot guarantee that context will be valid with the handler function is /// executed. It also seems like that argument's use is rare, so we are omitting /// it for the time being. When T122210155, we should be able to provide the ctx /// argument without introducing unsafety. unsafe fn invoke_signal_handler( signal_val: libc::c_int, action: &libc::sigaction, sig_info: libc::siginfo_t, )
/// Register the given sigaction as the default. Optionally an override function /// can be passed in that will us to change the default handler for an action fn insert_action( sigaction: libc::sigaction, override_default_handler: Option<libc::sighandler_t>, ) -> SlotKey { HANDLER_SLOT_MAP.insert(Some(SigActionPair::new( sigaction, override_default_handler, ))) } /// Register a signal handler for the guest and return the sigaction currently /// registered for the specified signal #[allow(dead_code)] pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction { register_guest_handler_impl(signal_value, new_action, false) .expect("All signals should have pre-registered guest handlers before now") } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_DFL` which is the default handler /// value for almost all signals. This function will stop all threads in order /// to raise thread-exit events for each pub extern "C" fn default_exit_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(0); } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_IGN` which is the default handler /// value for lots of signals. This function does nothing, but allows uniform /// treatment of function pointers in signal handlers (instead of checking for) ///specific values of sighandler_t before calling pub extern "C" fn default_ignore_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_ERR` which is the default handler /// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc). /// This function will stop all threads in order to raise thread-exit events /// for each, but the error code will be non-zero pub extern "C" fn default_error_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(1); } /// This macro defines the functions and constants and api for signals based on /// an input set of signal. There should only be one invocation of the macro, /// and it is below. It allows us to express the list of signals we are /// supporting with properties on each to deal with edge cases macro_rules! generate_signal_handlers { ( default_exit_handler: $default_exit_handler_fn:expr, default_ignore_handler: $default_ignore_handler_fn:expr, default_error_handler: $default_error_handler_fn:expr, signals: [$($signal_name:ident $({ $(override_default = $override_default_handler:expr;)? $(guest_handler_allowed = $guest_handler_allowed:expr;)? })?),+$(,)?]) => { /// All signal values as i32 mod signal_values { $( pub const $signal_name: i32 = libc::$signal_name as i32; )+ } /// Storage for the slot keys that point to the handlers for each signal mod handler_keys { use super::*; $( pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None); )+ } /// Handler functions for each signal mod reverie_handlers { use super::*; $( #[allow(non_snake_case)] pub fn $signal_name(handler_input: HandlerInput) { if let Some(Some(SigActionPair { internal_action, .. })) = handler_keys::$signal_name .load(Relaxed) .and_then(|key| HANDLER_SLOT_MAP.get(key)) { unsafe { invoke_signal_handler( signal_values::$signal_name as libc::c_int, internal_action, handler_input, ); } } } )+ } /// This is the function that will be registered for all signals. /// guest and default handlers for each signal will be dispatched from /// here using the global sequencer to prevent signals from interfering /// with reverie or its tool's state pub extern "C" fn central_handler<T: ToolGlobal>( real_signal_value: i32, sig_info_ptr: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { let wrapped_handler = match real_signal_value { $( signal_values::$signal_name => reverie_handlers::$signal_name, )+ _ => panic!("Invalid signal {}", real_signal_value) }; let sig_info = unsafe { *sig_info_ptr }; T::global().handle_signal_event(real_signal_value); signal::guard::invoke_guarded(wrapped_handler, sig_info); } /// This is the funtion that needs to be called to initialize all the /// signal handling machinery. This will register our central handler /// for all signals pub fn register_central_handler<T: ToolGlobal>() { // Register the default handler functions that correspond to the // scalar sighandler_t behaviors. This is safe because this will // only be done before the first syscall is handled, and only // one thread will be active. unsafe { DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn as *const libc::c_void as libc::sighandler_t); } // To make sure handlers are set before continuing fence(SeqCst); $( unsafe { let sa_sigaction = central_handler::<T> as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) as *mut libc::c_void as libc::sighandler_t; let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit(); assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset"); libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name); let action = libc::sigaction { sa_sigaction, sa_mask: sa_mask.assume_init(), sa_flags: 0x14000000, sa_restorer: None, }; let mut original_action : MaybeUninit<libc::sigaction> = MaybeUninit::uninit(); assert_eq!(0, libc::sigaction( signal_values::$signal_name as libc::c_int, &action as *const libc::sigaction, original_action.as_mut_ptr(), ), "Failed to register central handler for {}", stringify!($signal_name)); let override_default_handler = None $($( .or(Some( $override_default_handler as *const libc::c_void as libc::sighandler_t) ) )?)?; let handler_key = insert_action( original_action.assume_init(), override_default_handler, ); handler_keys::$signal_name.store(Some(handler_key), SeqCst); } )+ } /// Register the given action for the given signal. The force-allow /// flag means that the handler will be registered even if guest /// handlers are disallowed for the given signal. Return a copy of the /// sigaction that was previously associated with the given signal fn register_guest_handler_impl( signal_value: i32, new_action: libc::sigaction, force_allow: bool ) -> Option<libc::sigaction> { let (handler_key, guest_handler_allowed, signal_name) = match signal_value { $( signal_values::$signal_name => { let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?); let signal_name = stringify!($signal_name); (&handler_keys::$signal_name, allowed, signal_name) }, )+ _ => panic!("Invalid signal {}", signal_value) }; if!guest_handler_allowed { panic!("Guest handler registration for {} is not supported", signal_name); } let new_action_key = insert_action(new_action, None); let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed); // The first time this function is called, there won't be a stored // key for every signal action, but if there is return it. It is // safe because the key being used must have come from the same // map, and because no elements are deleted, the get operation // will always succeed old_action_key_opt.map(|old_action_key| unsafe { HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action }) } /// Get the sigaction registered for the given signal if there is one. /// The returned sigaction will either be the original default sigaction /// set by default for the application or the unaltered sigaction /// registered by the user #[allow(dead_code)] pub fn get_registered_guest_handler( signal_value: i32 ) -> libc::sigaction { let current_action_key = match signal_value { $( signal_values::$signal_name => { handler_keys::$signal_name .load(Relaxed) .expect("All signals should have guest handlers before now") } )+ _ => panic!("Invalid signal {}", signal_value) }; // This is safe because the key being used must have come from the // same map, and because no elements are deleted, the get operation // will always succeed unsafe { HANDLER_SLOT_MAP.get_unchecked(current_action_key) .unwrap().guest_facing_action } } }; } generate_signal_handlers! { default_exit_handler: default_exit_handler::<T>, default_ignore_handler: default_ignore_handler::<T>, default_error_handler: default_error_handler::<T>, signals: [ SIGHUP, SIGINT, SIGQUIT, // SIGILL, <- needs special synchronous handling Todo(T129735993) SIGTRAP, SIGABRT, SIGBUS, SIGFPE, // SIGKILL, <- cannot be handled directly Todo(T129348205) SIGUSR1, // SIGSEGV, <- needs special synchronous handling Todo(T129735993) SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGSTKFLT { // This is our controlled exit signal. If the guest tries to // register a handler for it, we will panic rather than chancining // undefined behavior override_default = crate::callbacks::handle_exit_signal::<T>; guest_handler_allowed = false; }, // SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829 SIGCONT, // SIGSTOP, <- cannot be handled directly Todo(T129348205) SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGIO, SIGPWR, SIGSYS, ] }
{ if action.sa_flags & libc::SA_SIGINFO > 0 { let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) = transmute(action.sa_sigaction as *const libc::c_void); to_run( signal_val, &sig_info as *const libc::siginfo_t, ptr::null::<libc::c_void>(), ); } else { let to_run: extern "C" fn(libc::c_int) = transmute(action.sa_sigaction as *const libc::c_void); to_run(signal_val); } }
identifier_body
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ use core::mem::transmute; use core::mem::MaybeUninit; use core::ptr; use core::sync::atomic::fence; use core::sync::atomic::Ordering::*; use atomic::Atomic; use lazy_static::lazy_static; use crate::callbacks; use crate::signal; use crate::slot_map::SlotKey; use crate::slot_map::SlotMap; use crate::tool::Tool; use crate::tool::ToolGlobal; pub mod guard; pub type HandlerInput = libc::siginfo_t; /// We need to keep track of the sigaction that the user specified or what was /// originally provided as a default separately from what we execute directly as /// a signal handler. #[derive(Debug, Clone, Copy)] struct SigActionPair { /// Prisinte sigaction provided by the user or os guest_facing_action: libc::sigaction, /// The actual sigaction we are using internal_action: libc::sigaction, } impl SigActionPair { /// Create a new SigActionPair from the original sig action and an override /// for the default handler. The created pair will contain the original /// action, and a synthetic action with the handler replaced if an override /// is provided or if the the sa_sigaction is one of the non-function- /// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`) fn new(original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self { let mut internal_action = original.clone(); // This is safe because it is only reading from a mut static that is // guaranteed to have been completely set before this function // is called internal_action.sa_sigaction = unsafe { match (original.sa_sigaction, override_handler) { (_, Some(override_handler)) => override_handler, (libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER .expect("Default handlers should be set before registering actions"), (default_action, None) => default_action, } }; SigActionPair { guest_facing_action: original, internal_action, } } } lazy_static! { /// This is where we are storing the registered actions for each signal. /// We have to store them as Options for now because our slot map requires /// its stored type to implement default static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new(); } // The sighandler_t type has some values that aren't pointers that are still // valid. They aren't executable, so we need an executable version that we // control for each. Those are below /// Storage of our default handler for the libc::SIG_DFL static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_IGN static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_ERR static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None; /// This function invokes the function specified by the given sigaction directly /// with the given signal value or siginfo as arguments depending on whether /// the sigaction's flags indicate it is expecting a sigaction or siginfo. /// Note. In the case that the action is requesting sigaction, the 3rd argument /// to the handler will always be null. The specifications for sigaction say the /// third argument is a pointer to the context for the signal being raised, but /// we cannot guarantee that context will be valid with the handler function is /// executed. It also seems like that argument's use is rare, so we are omitting /// it for the time being. When T122210155, we should be able to provide the ctx /// argument without introducing unsafety. unsafe fn invoke_signal_handler( signal_val: libc::c_int, action: &libc::sigaction, sig_info: libc::siginfo_t, ) { if action.sa_flags & libc::SA_SIGINFO > 0 { let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) = transmute(action.sa_sigaction as *const libc::c_void); to_run( signal_val, &sig_info as *const libc::siginfo_t, ptr::null::<libc::c_void>(), ); } else { let to_run: extern "C" fn(libc::c_int) = transmute(action.sa_sigaction as *const libc::c_void); to_run(signal_val); } } /// Register the given sigaction as the default. Optionally an override function /// can be passed in that will us to change the default handler for an action fn insert_action( sigaction: libc::sigaction, override_default_handler: Option<libc::sighandler_t>, ) -> SlotKey { HANDLER_SLOT_MAP.insert(Some(SigActionPair::new( sigaction, override_default_handler, ))) } /// Register a signal handler for the guest and return the sigaction currently /// registered for the specified signal #[allow(dead_code)] pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction { register_guest_handler_impl(signal_value, new_action, false) .expect("All signals should have pre-registered guest handlers before now") } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_DFL` which is the default handler /// value for almost all signals. This function will stop all threads in order /// to raise thread-exit events for each pub extern "C" fn default_exit_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(0); }
/// treatment of function pointers in signal handlers (instead of checking for) ///specific values of sighandler_t before calling pub extern "C" fn default_ignore_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_ERR` which is the default handler /// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc). /// This function will stop all threads in order to raise thread-exit events /// for each, but the error code will be non-zero pub extern "C" fn default_error_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(1); } /// This macro defines the functions and constants and api for signals based on /// an input set of signal. There should only be one invocation of the macro, /// and it is below. It allows us to express the list of signals we are /// supporting with properties on each to deal with edge cases macro_rules! generate_signal_handlers { ( default_exit_handler: $default_exit_handler_fn:expr, default_ignore_handler: $default_ignore_handler_fn:expr, default_error_handler: $default_error_handler_fn:expr, signals: [$($signal_name:ident $({ $(override_default = $override_default_handler:expr;)? $(guest_handler_allowed = $guest_handler_allowed:expr;)? })?),+$(,)?]) => { /// All signal values as i32 mod signal_values { $( pub const $signal_name: i32 = libc::$signal_name as i32; )+ } /// Storage for the slot keys that point to the handlers for each signal mod handler_keys { use super::*; $( pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None); )+ } /// Handler functions for each signal mod reverie_handlers { use super::*; $( #[allow(non_snake_case)] pub fn $signal_name(handler_input: HandlerInput) { if let Some(Some(SigActionPair { internal_action, .. })) = handler_keys::$signal_name .load(Relaxed) .and_then(|key| HANDLER_SLOT_MAP.get(key)) { unsafe { invoke_signal_handler( signal_values::$signal_name as libc::c_int, internal_action, handler_input, ); } } } )+ } /// This is the function that will be registered for all signals. /// guest and default handlers for each signal will be dispatched from /// here using the global sequencer to prevent signals from interfering /// with reverie or its tool's state pub extern "C" fn central_handler<T: ToolGlobal>( real_signal_value: i32, sig_info_ptr: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { let wrapped_handler = match real_signal_value { $( signal_values::$signal_name => reverie_handlers::$signal_name, )+ _ => panic!("Invalid signal {}", real_signal_value) }; let sig_info = unsafe { *sig_info_ptr }; T::global().handle_signal_event(real_signal_value); signal::guard::invoke_guarded(wrapped_handler, sig_info); } /// This is the funtion that needs to be called to initialize all the /// signal handling machinery. This will register our central handler /// for all signals pub fn register_central_handler<T: ToolGlobal>() { // Register the default handler functions that correspond to the // scalar sighandler_t behaviors. This is safe because this will // only be done before the first syscall is handled, and only // one thread will be active. unsafe { DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn as *const libc::c_void as libc::sighandler_t); } // To make sure handlers are set before continuing fence(SeqCst); $( unsafe { let sa_sigaction = central_handler::<T> as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) as *mut libc::c_void as libc::sighandler_t; let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit(); assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset"); libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name); let action = libc::sigaction { sa_sigaction, sa_mask: sa_mask.assume_init(), sa_flags: 0x14000000, sa_restorer: None, }; let mut original_action : MaybeUninit<libc::sigaction> = MaybeUninit::uninit(); assert_eq!(0, libc::sigaction( signal_values::$signal_name as libc::c_int, &action as *const libc::sigaction, original_action.as_mut_ptr(), ), "Failed to register central handler for {}", stringify!($signal_name)); let override_default_handler = None $($( .or(Some( $override_default_handler as *const libc::c_void as libc::sighandler_t) ) )?)?; let handler_key = insert_action( original_action.assume_init(), override_default_handler, ); handler_keys::$signal_name.store(Some(handler_key), SeqCst); } )+ } /// Register the given action for the given signal. The force-allow /// flag means that the handler will be registered even if guest /// handlers are disallowed for the given signal. Return a copy of the /// sigaction that was previously associated with the given signal fn register_guest_handler_impl( signal_value: i32, new_action: libc::sigaction, force_allow: bool ) -> Option<libc::sigaction> { let (handler_key, guest_handler_allowed, signal_name) = match signal_value { $( signal_values::$signal_name => { let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?); let signal_name = stringify!($signal_name); (&handler_keys::$signal_name, allowed, signal_name) }, )+ _ => panic!("Invalid signal {}", signal_value) }; if!guest_handler_allowed { panic!("Guest handler registration for {} is not supported", signal_name); } let new_action_key = insert_action(new_action, None); let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed); // The first time this function is called, there won't be a stored // key for every signal action, but if there is return it. It is // safe because the key being used must have come from the same // map, and because no elements are deleted, the get operation // will always succeed old_action_key_opt.map(|old_action_key| unsafe { HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action }) } /// Get the sigaction registered for the given signal if there is one. /// The returned sigaction will either be the original default sigaction /// set by default for the application or the unaltered sigaction /// registered by the user #[allow(dead_code)] pub fn get_registered_guest_handler( signal_value: i32 ) -> libc::sigaction { let current_action_key = match signal_value { $( signal_values::$signal_name => { handler_keys::$signal_name .load(Relaxed) .expect("All signals should have guest handlers before now") } )+ _ => panic!("Invalid signal {}", signal_value) }; // This is safe because the key being used must have come from the // same map, and because no elements are deleted, the get operation // will always succeed unsafe { HANDLER_SLOT_MAP.get_unchecked(current_action_key) .unwrap().guest_facing_action } } }; } generate_signal_handlers! { default_exit_handler: default_exit_handler::<T>, default_ignore_handler: default_ignore_handler::<T>, default_error_handler: default_error_handler::<T>, signals: [ SIGHUP, SIGINT, SIGQUIT, // SIGILL, <- needs special synchronous handling Todo(T129735993) SIGTRAP, SIGABRT, SIGBUS, SIGFPE, // SIGKILL, <- cannot be handled directly Todo(T129348205) SIGUSR1, // SIGSEGV, <- needs special synchronous handling Todo(T129735993) SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGSTKFLT { // This is our controlled exit signal. If the guest tries to // register a handler for it, we will panic rather than chancining // undefined behavior override_default = crate::callbacks::handle_exit_signal::<T>; guest_handler_allowed = false; }, // SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829 SIGCONT, // SIGSTOP, <- cannot be handled directly Todo(T129348205) SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGIO, SIGPWR, SIGSYS, ] }
/// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_IGN` which is the default handler /// value for lots of signals. This function does nothing, but allows uniform
random_line_split
mod.rs
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ use core::mem::transmute; use core::mem::MaybeUninit; use core::ptr; use core::sync::atomic::fence; use core::sync::atomic::Ordering::*; use atomic::Atomic; use lazy_static::lazy_static; use crate::callbacks; use crate::signal; use crate::slot_map::SlotKey; use crate::slot_map::SlotMap; use crate::tool::Tool; use crate::tool::ToolGlobal; pub mod guard; pub type HandlerInput = libc::siginfo_t; /// We need to keep track of the sigaction that the user specified or what was /// originally provided as a default separately from what we execute directly as /// a signal handler. #[derive(Debug, Clone, Copy)] struct SigActionPair { /// Prisinte sigaction provided by the user or os guest_facing_action: libc::sigaction, /// The actual sigaction we are using internal_action: libc::sigaction, } impl SigActionPair { /// Create a new SigActionPair from the original sig action and an override /// for the default handler. The created pair will contain the original /// action, and a synthetic action with the handler replaced if an override /// is provided or if the the sa_sigaction is one of the non-function- /// pointer values (`SI_DFL`, `SI_ERR`, `SI_IGN`) fn
(original: libc::sigaction, override_handler: Option<libc::sighandler_t>) -> Self { let mut internal_action = original.clone(); // This is safe because it is only reading from a mut static that is // guaranteed to have been completely set before this function // is called internal_action.sa_sigaction = unsafe { match (original.sa_sigaction, override_handler) { (_, Some(override_handler)) => override_handler, (libc::SIG_DFL, _) => DEFAULT_EXIT_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_IGN, _) => DEFAULT_IGNORE_HANDLER .expect("Default handlers should be set before registering actions"), (libc::SIG_ERR, _) => DEFAULT_ERROR_HANDLER .expect("Default handlers should be set before registering actions"), (default_action, None) => default_action, } }; SigActionPair { guest_facing_action: original, internal_action, } } } lazy_static! { /// This is where we are storing the registered actions for each signal. /// We have to store them as Options for now because our slot map requires /// its stored type to implement default static ref HANDLER_SLOT_MAP: SlotMap<Option<SigActionPair>> = SlotMap::new(); } // The sighandler_t type has some values that aren't pointers that are still // valid. They aren't executable, so we need an executable version that we // control for each. Those are below /// Storage of our default handler for the libc::SIG_DFL static mut DEFAULT_EXIT_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_IGN static mut DEFAULT_IGNORE_HANDLER: Option<libc::sighandler_t> = None; /// Storage of our default handler for the libc::SIG_ERR static mut DEFAULT_ERROR_HANDLER: Option<libc::sighandler_t> = None; /// This function invokes the function specified by the given sigaction directly /// with the given signal value or siginfo as arguments depending on whether /// the sigaction's flags indicate it is expecting a sigaction or siginfo. /// Note. In the case that the action is requesting sigaction, the 3rd argument /// to the handler will always be null. The specifications for sigaction say the /// third argument is a pointer to the context for the signal being raised, but /// we cannot guarantee that context will be valid with the handler function is /// executed. It also seems like that argument's use is rare, so we are omitting /// it for the time being. When T122210155, we should be able to provide the ctx /// argument without introducing unsafety. unsafe fn invoke_signal_handler( signal_val: libc::c_int, action: &libc::sigaction, sig_info: libc::siginfo_t, ) { if action.sa_flags & libc::SA_SIGINFO > 0 { let to_run: extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) = transmute(action.sa_sigaction as *const libc::c_void); to_run( signal_val, &sig_info as *const libc::siginfo_t, ptr::null::<libc::c_void>(), ); } else { let to_run: extern "C" fn(libc::c_int) = transmute(action.sa_sigaction as *const libc::c_void); to_run(signal_val); } } /// Register the given sigaction as the default. Optionally an override function /// can be passed in that will us to change the default handler for an action fn insert_action( sigaction: libc::sigaction, override_default_handler: Option<libc::sighandler_t>, ) -> SlotKey { HANDLER_SLOT_MAP.insert(Some(SigActionPair::new( sigaction, override_default_handler, ))) } /// Register a signal handler for the guest and return the sigaction currently /// registered for the specified signal #[allow(dead_code)] pub fn register_guest_handler(signal_value: i32, new_action: libc::sigaction) -> libc::sigaction { register_guest_handler_impl(signal_value, new_action, false) .expect("All signals should have pre-registered guest handlers before now") } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_DFL` which is the default handler /// value for almost all signals. This function will stop all threads in order /// to raise thread-exit events for each pub extern "C" fn default_exit_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(0); } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_IGN` which is the default handler /// value for lots of signals. This function does nothing, but allows uniform /// treatment of function pointers in signal handlers (instead of checking for) ///specific values of sighandler_t before calling pub extern "C" fn default_ignore_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { } /// This is our replacement for default handlers where /// `libc::sighandler_t = libc::SIG_ERR` which is the default handler /// value for signals representing unrecoverable errors (SIGILL, SIGSEGV, etc). /// This function will stop all threads in order to raise thread-exit events /// for each, but the error code will be non-zero pub extern "C" fn default_error_handler<T: ToolGlobal>( _signal_value: libc::c_int, _siginfo: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { callbacks::exit_group::<T>(1); } /// This macro defines the functions and constants and api for signals based on /// an input set of signal. There should only be one invocation of the macro, /// and it is below. It allows us to express the list of signals we are /// supporting with properties on each to deal with edge cases macro_rules! generate_signal_handlers { ( default_exit_handler: $default_exit_handler_fn:expr, default_ignore_handler: $default_ignore_handler_fn:expr, default_error_handler: $default_error_handler_fn:expr, signals: [$($signal_name:ident $({ $(override_default = $override_default_handler:expr;)? $(guest_handler_allowed = $guest_handler_allowed:expr;)? })?),+$(,)?]) => { /// All signal values as i32 mod signal_values { $( pub const $signal_name: i32 = libc::$signal_name as i32; )+ } /// Storage for the slot keys that point to the handlers for each signal mod handler_keys { use super::*; $( pub static $signal_name: Atomic<Option<SlotKey>> = Atomic::new(None); )+ } /// Handler functions for each signal mod reverie_handlers { use super::*; $( #[allow(non_snake_case)] pub fn $signal_name(handler_input: HandlerInput) { if let Some(Some(SigActionPair { internal_action, .. })) = handler_keys::$signal_name .load(Relaxed) .and_then(|key| HANDLER_SLOT_MAP.get(key)) { unsafe { invoke_signal_handler( signal_values::$signal_name as libc::c_int, internal_action, handler_input, ); } } } )+ } /// This is the function that will be registered for all signals. /// guest and default handlers for each signal will be dispatched from /// here using the global sequencer to prevent signals from interfering /// with reverie or its tool's state pub extern "C" fn central_handler<T: ToolGlobal>( real_signal_value: i32, sig_info_ptr: *const libc::siginfo_t, _ctx: *const libc::c_void, ) { let wrapped_handler = match real_signal_value { $( signal_values::$signal_name => reverie_handlers::$signal_name, )+ _ => panic!("Invalid signal {}", real_signal_value) }; let sig_info = unsafe { *sig_info_ptr }; T::global().handle_signal_event(real_signal_value); signal::guard::invoke_guarded(wrapped_handler, sig_info); } /// This is the funtion that needs to be called to initialize all the /// signal handling machinery. This will register our central handler /// for all signals pub fn register_central_handler<T: ToolGlobal>() { // Register the default handler functions that correspond to the // scalar sighandler_t behaviors. This is safe because this will // only be done before the first syscall is handled, and only // one thread will be active. unsafe { DEFAULT_EXIT_HANDLER = Some($default_exit_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_IGNORE_HANDLER = Some($default_ignore_handler_fn as *const libc::c_void as libc::sighandler_t); DEFAULT_ERROR_HANDLER = Some($default_error_handler_fn as *const libc::c_void as libc::sighandler_t); } // To make sure handlers are set before continuing fence(SeqCst); $( unsafe { let sa_sigaction = central_handler::<T> as extern "C" fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) as *mut libc::c_void as libc::sighandler_t; let mut sa_mask = MaybeUninit::<libc::sigset_t>::uninit(); assert_eq!(0, libc::sigemptyset(sa_mask.as_mut_ptr()), "Failed to create sigset"); libc::sigaddset(sa_mask.as_mut_ptr(), signal_values::$signal_name); let action = libc::sigaction { sa_sigaction, sa_mask: sa_mask.assume_init(), sa_flags: 0x14000000, sa_restorer: None, }; let mut original_action : MaybeUninit<libc::sigaction> = MaybeUninit::uninit(); assert_eq!(0, libc::sigaction( signal_values::$signal_name as libc::c_int, &action as *const libc::sigaction, original_action.as_mut_ptr(), ), "Failed to register central handler for {}", stringify!($signal_name)); let override_default_handler = None $($( .or(Some( $override_default_handler as *const libc::c_void as libc::sighandler_t) ) )?)?; let handler_key = insert_action( original_action.assume_init(), override_default_handler, ); handler_keys::$signal_name.store(Some(handler_key), SeqCst); } )+ } /// Register the given action for the given signal. The force-allow /// flag means that the handler will be registered even if guest /// handlers are disallowed for the given signal. Return a copy of the /// sigaction that was previously associated with the given signal fn register_guest_handler_impl( signal_value: i32, new_action: libc::sigaction, force_allow: bool ) -> Option<libc::sigaction> { let (handler_key, guest_handler_allowed, signal_name) = match signal_value { $( signal_values::$signal_name => { let allowed = force_allow || (true $($( && $guest_handler_allowed)?)?); let signal_name = stringify!($signal_name); (&handler_keys::$signal_name, allowed, signal_name) }, )+ _ => panic!("Invalid signal {}", signal_value) }; if!guest_handler_allowed { panic!("Guest handler registration for {} is not supported", signal_name); } let new_action_key = insert_action(new_action, None); let old_action_key_opt = handler_key.swap(Some(new_action_key), Relaxed); // The first time this function is called, there won't be a stored // key for every signal action, but if there is return it. It is // safe because the key being used must have come from the same // map, and because no elements are deleted, the get operation // will always succeed old_action_key_opt.map(|old_action_key| unsafe { HANDLER_SLOT_MAP.get_unchecked(old_action_key).unwrap().guest_facing_action }) } /// Get the sigaction registered for the given signal if there is one. /// The returned sigaction will either be the original default sigaction /// set by default for the application or the unaltered sigaction /// registered by the user #[allow(dead_code)] pub fn get_registered_guest_handler( signal_value: i32 ) -> libc::sigaction { let current_action_key = match signal_value { $( signal_values::$signal_name => { handler_keys::$signal_name .load(Relaxed) .expect("All signals should have guest handlers before now") } )+ _ => panic!("Invalid signal {}", signal_value) }; // This is safe because the key being used must have come from the // same map, and because no elements are deleted, the get operation // will always succeed unsafe { HANDLER_SLOT_MAP.get_unchecked(current_action_key) .unwrap().guest_facing_action } } }; } generate_signal_handlers! { default_exit_handler: default_exit_handler::<T>, default_ignore_handler: default_ignore_handler::<T>, default_error_handler: default_error_handler::<T>, signals: [ SIGHUP, SIGINT, SIGQUIT, // SIGILL, <- needs special synchronous handling Todo(T129735993) SIGTRAP, SIGABRT, SIGBUS, SIGFPE, // SIGKILL, <- cannot be handled directly Todo(T129348205) SIGUSR1, // SIGSEGV, <- needs special synchronous handling Todo(T129735993) SIGUSR2, SIGPIPE, SIGALRM, SIGTERM, SIGSTKFLT { // This is our controlled exit signal. If the guest tries to // register a handler for it, we will panic rather than chancining // undefined behavior override_default = crate::callbacks::handle_exit_signal::<T>; guest_handler_allowed = false; }, // SIGCHLD, <- Causing problems in test_rr_syscallbuf_sigstop T128095829 SIGCONT, // SIGSTOP, <- cannot be handled directly Todo(T129348205) SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGXCPU, SIGXFSZ, SIGVTALRM, SIGPROF, SIGWINCH, SIGIO, SIGPWR, SIGSYS, ] }
new
identifier_name
set1.rs
=> ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6), 3 => ((extended[2] & 0x3F) << 0), _ => return Err("too many groups!"), }; let symbol: char = match sextet { c @ 0...25 => char::from(0x41 + c), c @ 26...51 => char::from(0x61 + c - 26), c @ 52...61 => char::from(0x30 + c - 52), 62 => '+', 63 => '/', _ => return Err("too many bits!"), }; if (group.len() as i8) - (i as i8) >= 0 { result.push(symbol); } else { result.push('='); } } } return Ok(result); } pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> { let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4); let encoded_stripped = encoded .as_bytes() .iter() .cloned() .filter(|letter| match *letter { b'\n' => false, _ => true, }) .collect::<Vec<u8>>(); for group in encoded_stripped.chunks(4) { if group.len()!= 4 { return Err("chunk too small!"); } let mut padding: i8 = 0; let sextets = group .iter() .map(|letter| match *letter { c @ b'A'...b'Z' => Ok(c as u8 - 0x41), c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26), c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52), b'+' => Ok(62), b'/' => Ok(63), b'=' => { padding += 1; Ok(0) } _ => Err("illegal character!"), }) .collect::<Result<Vec<u8>, &'static str>>()?; for i in 0..=2 { let octet = match i { 0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4), 1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2), 2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0), _ => return Err("too many octets!"), }; if (i as i8) < (3 - padding) { result.push(octet); } } } return Ok(result); } pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> { if a.len()!= b.len() { return Err("buffer size mismatch"); } let result = a.iter() .zip(b) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return Ok(result); } pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> { if data.len()!= other.len() { return Err("buffer size mismatch"); } let xor_count = data.iter_mut() .zip(other) .map(|(data_elem, other_elem)| { *data_elem ^= other_elem; }) .count(); return Ok(xor_count); } pub fn
(plaintext: &[u8], key: &[u8]) -> Vec<u8> { let result = plaintext .iter() .zip(key.iter().cycle()) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return result; } use std::collections::BTreeMap; pub fn char_freq_score(text: &[u8]) -> f64 { let mut non_printable_count = 0; let letter_freq: BTreeMap<u8, f64> = btreemap! { b'a' => 0.08167, b'b' => 0.01492, b'c' => 0.02782, b'd' => 0.04253, b'e' => 0.12702, b'f' => 0.02228, b'g' => 0.02015, b'h' => 0.06094, b'i' => 0.06966, b'j' => 0.00153, b'k' => 0.00772, b'l' => 0.04025, b'm' => 0.02406, b'n' => 0.06749, b'o' => 0.07507, b'p' => 0.01929, b'q' => 0.00095, b'r' => 0.05987, b's' => 0.06327, b't' => 0.09056, b'u' => 0.02758, b'v' => 0.00978, b'w' => 0.02360, b'x' => 0.00150, b'y' => 0.01974, b'z' => 0.00074, }; let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new(); for letter in b'a'..=b'z' { letter_counts.insert(letter, 0); } let mut num_letters = 0; for letter in text { match *letter { // null 0 => {} // non-printable characters 1...9 => non_printable_count += 1, // newline 10 => {} // more non-printable characters 11...31 => non_printable_count += 1, // space 32 => {} // printable symbols, including digits (ascii '!' - '@') 33...64 => {} // upper-case letters c @ 65...90 => { *letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '[' - '`') 91...96 => {} // lower-case letters c @ 97...122 => { *letter_counts.get_mut(&c).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '{' - '~') 123...126 => {} // non-printable characters _ => non_printable_count += 1, } } if num_letters == 0 { return 10000.0 + (non_printable_count as f64 * 500.0); } let mut chisquared = 0.0; for (key, prob) in letter_freq { chisquared += (num_letters as f64) * ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0) / prob; } return chisquared + (non_printable_count as f64 * 500.0); } extern crate bit_vec; use self::bit_vec::BitVec; pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> { if a.len()!= b.len() { return Err("sequences must have same length"); } let result = a.iter() .zip(b.iter()) .map(|(aa, bb)| -> u32 { BitVec::from_bytes(&[aa ^ bb]) .iter() .map(|val| val as u32) .sum() }) .sum(); return Ok(result); } pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 { let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; ciphertext.len()]; if let Ok(decoded_bytes) = xor(ciphertext, &key) { let score = char_freq_score(&decoded_bytes); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let (_, key, _) = decoded[0]; return key; } pub fn to_hex(data: &[u8]) -> String { data.iter() .map(|b| format!("{:02x}", b)) .collect::<Vec<String>>() .join("") } #[cfg(test)] mod tests { use set1; extern crate hex; use self::hex::FromHex; #[test] fn base64_encode() { let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d"; let example_bytes = Vec::from_hex(example_hex).unwrap(); if let Ok(b64) = set1::base64_encode(&example_bytes) { assert_eq!( b64, "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t" ); } else { panic!(); } let test = "foobar".as_bytes(); if let Ok(b64) = set1::base64_encode(&test) { assert_eq!(b64, "Zm9vYmFy"); } else { panic!(); } } #[test] fn base64_decode() { let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"; if let Ok(example) = set1::base64_decode(example_b64) { assert_eq!( example, Vec::from_hex( "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d" ).unwrap() ); } else { panic!(); } let b64 = "Zm9vYmFy"; if let Ok(test) = set1::base64_decode(&b64) { assert_eq!(test, "foobar".as_bytes()); } else { panic!(); } } #[test] fn xor() { let a = "1c0111001f010100061a024b53535009181c"; let b = "686974207468652062756c6c277320657965"; let res = "746865206b696420646f6e277420706c6179"; let a_bytes = Vec::from_hex(a).unwrap(); let b_bytes = Vec::from_hex(b).unwrap(); let res_bytes = Vec::from_hex(res).unwrap(); match set1::xor(&a_bytes, &b_bytes) { Ok(r) => assert_eq!(r, res_bytes), Err(str) => panic!(str), }; } use std::collections::BTreeMap; use std::str; #[test] fn single_byte_xor_cipher() { let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"; let encoded_bytes = Vec::from_hex(encoded).unwrap(); //don't want to use a map here, because we'll lose any values with the same score //let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new(); let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; encoded_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); //decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes)); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); //let &(key, ref value) = decoded.values().next().unwrap(); let (_, key, ref value) = decoded[0]; assert_eq!(key, 88); assert_eq!( str::from_utf8(value.as_slice()).unwrap(), "Cooking MC's like a pound of bacon" ); } use std::fs::File; use std::io::BufReader; use std::io::BufRead; #[test] fn detect_single_char_xor() { let file = File::open("challenge-data/4.txt").unwrap(); let reader = BufReader::new(file); let mut decoded = BTreeMap::new(); let mut line_num = 0; for line in reader.lines() { if let Ok(line) = line { let line_bytes = Vec::from_hex(line).unwrap(); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; line_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes)); } } } line_num += 1; } let mut found = false; for (score, &(line, key, ref value)) in decoded.iter() { let score: f64 = *score as f64 / 1000.0; if score < 100.0 { if line == 170 && key == 53 { let value = str::from_utf8(value).unwrap(); assert_eq!(value, "Now that the party is jumping\n"); found = true; } } } assert!(found, "decrypted string not found!"); } #[test] fn repeating_key_xor() { let plaintext = "Burning 'em, if you ain't quick and nimble\n\ I go crazy when I hear a cymbal"; let key = "ICE"; let plaintext = plaintext.as_bytes(); let key = key.as_bytes(); let ciphertext = set1::xor_repeat(&plaintext, &key); let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\ 324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\ 283165286326302e27282f"; let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap(); assert_eq!(ciphertext, ciphertext_ref); } #[test] fn hamming_distance() { assert_eq!( set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes()) .unwrap(), 37 ); } #[test] fn break_repeating_key_xor() { let mut f = File::open("challenge-data/6.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let mut results: Vec<(f32, usize)> = Vec::with_capacity(40); for keysize in 2..=40 { let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let norm_distances = sequences .chunks(2) .filter(|maybe_pair| maybe_pair.len() == 2) .filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len()) .map(|pair| { set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32 }) .collect::<Vec<f32>>(); let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32; results.push((norm_dist_avg, keysize)); } results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let keysize = results[0].1; assert_eq!(keysize, 29); let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize); for i in 0..keysize { let mut line = Vec::with_capacity(sequences.len()); for j in 0..sequences.len() { if i < sequences[j].len() { line.push(sequences[j][i]); } } transposed.push(line); } let mut key: Vec<u8> = Vec::with_capacity(keysize); for block in transposed { let key_byte = set1::find_best_single_byte_xor(&block); key.push(key_byte); } assert_eq!( str::from_utf8(&key).unwrap(), "Terminator X: Bring the noise" ); let plaintext = set1::xor_repeat(&decoded, &key); let plaintext = str::from_utf8(&plaintext).unwrap(); let mut f = File::open("challenge-data/6_plaintext.txt").unwrap(); let mut plaintext_ref = String::new(); f.read_to_string(&mut plaintext_ref).unwrap(); assert_eq!(plaintext, plaintext_ref); } extern crate openssl; use self::openssl::symm; use self::openssl::symm::Cipher; use std::io::prelude::*; #[test] fn aes_ecb_mode() { let mut f = File::open("challenge-data/7.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let plaintext = symm::decrypt( Cipher::aes_128_ecb(), "YELLOW SUBMARINE".as_bytes(), None,
xor_repeat
identifier_name
set1.rs
=> ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6), 3 => ((extended[2] & 0x3F) << 0), _ => return Err("too many groups!"), }; let symbol: char = match sextet { c @ 0...25 => char::from(0x41 + c), c @ 26...51 => char::from(0x61 + c - 26), c @ 52...61 => char::from(0x30 + c - 52), 62 => '+', 63 => '/', _ => return Err("too many bits!"), }; if (group.len() as i8) - (i as i8) >= 0 { result.push(symbol); } else { result.push('='); } } } return Ok(result); } pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> { let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4); let encoded_stripped = encoded .as_bytes() .iter() .cloned() .filter(|letter| match *letter { b'\n' => false, _ => true, }) .collect::<Vec<u8>>(); for group in encoded_stripped.chunks(4) { if group.len()!= 4 { return Err("chunk too small!"); } let mut padding: i8 = 0; let sextets = group .iter() .map(|letter| match *letter { c @ b'A'...b'Z' => Ok(c as u8 - 0x41), c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26), c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52), b'+' => Ok(62), b'/' => Ok(63), b'=' => { padding += 1; Ok(0) } _ => Err("illegal character!"), }) .collect::<Result<Vec<u8>, &'static str>>()?; for i in 0..=2 { let octet = match i { 0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4), 1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2), 2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0), _ => return Err("too many octets!"), }; if (i as i8) < (3 - padding) { result.push(octet); } } } return Ok(result); } pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> { if a.len()!= b.len() { return Err("buffer size mismatch"); } let result = a.iter() .zip(b) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return Ok(result); } pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> { if data.len()!= other.len() { return Err("buffer size mismatch"); } let xor_count = data.iter_mut() .zip(other) .map(|(data_elem, other_elem)| { *data_elem ^= other_elem; }) .count(); return Ok(xor_count); } pub fn xor_repeat(plaintext: &[u8], key: &[u8]) -> Vec<u8> { let result = plaintext .iter() .zip(key.iter().cycle()) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return result; } use std::collections::BTreeMap; pub fn char_freq_score(text: &[u8]) -> f64 { let mut non_printable_count = 0; let letter_freq: BTreeMap<u8, f64> = btreemap! { b'a' => 0.08167, b'b' => 0.01492, b'c' => 0.02782, b'd' => 0.04253, b'e' => 0.12702, b'f' => 0.02228, b'g' => 0.02015, b'h' => 0.06094, b'i' => 0.06966, b'j' => 0.00153, b'k' => 0.00772, b'l' => 0.04025, b'm' => 0.02406, b'n' => 0.06749, b'o' => 0.07507, b'p' => 0.01929, b'q' => 0.00095, b'r' => 0.05987, b's' => 0.06327, b't' => 0.09056, b'u' => 0.02758, b'v' => 0.00978, b'w' => 0.02360, b'x' => 0.00150, b'y' => 0.01974, b'z' => 0.00074, }; let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new(); for letter in b'a'..=b'z' { letter_counts.insert(letter, 0); } let mut num_letters = 0; for letter in text { match *letter { // null 0 => {} // non-printable characters 1...9 => non_printable_count += 1, // newline 10 => {} // more non-printable characters 11...31 => non_printable_count += 1, // space 32 => {} // printable symbols, including digits (ascii '!' - '@') 33...64 => {} // upper-case letters c @ 65...90 => { *letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '[' - '`') 91...96 => {} // lower-case letters c @ 97...122 => { *letter_counts.get_mut(&c).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '{' - '~') 123...126 => {} // non-printable characters _ => non_printable_count += 1, } } if num_letters == 0 { return 10000.0 + (non_printable_count as f64 * 500.0); } let mut chisquared = 0.0; for (key, prob) in letter_freq { chisquared += (num_letters as f64) * ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0) / prob; } return chisquared + (non_printable_count as f64 * 500.0); } extern crate bit_vec; use self::bit_vec::BitVec; pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> { if a.len()!= b.len() { return Err("sequences must have same length"); } let result = a.iter() .zip(b.iter()) .map(|(aa, bb)| -> u32 { BitVec::from_bytes(&[aa ^ bb]) .iter() .map(|val| val as u32) .sum() }) .sum(); return Ok(result); } pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 { let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; ciphertext.len()]; if let Ok(decoded_bytes) = xor(ciphertext, &key) { let score = char_freq_score(&decoded_bytes); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let (_, key, _) = decoded[0]; return key; } pub fn to_hex(data: &[u8]) -> String { data.iter() .map(|b| format!("{:02x}", b)) .collect::<Vec<String>>() .join("") } #[cfg(test)] mod tests { use set1; extern crate hex; use self::hex::FromHex; #[test] fn base64_encode() { let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d"; let example_bytes = Vec::from_hex(example_hex).unwrap(); if let Ok(b64) = set1::base64_encode(&example_bytes) { assert_eq!( b64, "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t" ); } else { panic!(); } let test = "foobar".as_bytes(); if let Ok(b64) = set1::base64_encode(&test) { assert_eq!(b64, "Zm9vYmFy"); } else { panic!(); } } #[test] fn base64_decode() { let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"; if let Ok(example) = set1::base64_decode(example_b64) { assert_eq!( example, Vec::from_hex( "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d" ).unwrap() ); } else { panic!(); } let b64 = "Zm9vYmFy"; if let Ok(test) = set1::base64_decode(&b64) { assert_eq!(test, "foobar".as_bytes()); } else { panic!(); } } #[test] fn xor() { let a = "1c0111001f010100061a024b53535009181c"; let b = "686974207468652062756c6c277320657965"; let res = "746865206b696420646f6e277420706c6179"; let a_bytes = Vec::from_hex(a).unwrap(); let b_bytes = Vec::from_hex(b).unwrap(); let res_bytes = Vec::from_hex(res).unwrap(); match set1::xor(&a_bytes, &b_bytes) { Ok(r) => assert_eq!(r, res_bytes), Err(str) => panic!(str), }; } use std::collections::BTreeMap; use std::str; #[test] fn single_byte_xor_cipher() { let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"; let encoded_bytes = Vec::from_hex(encoded).unwrap(); //don't want to use a map here, because we'll lose any values with the same score //let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new(); let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; encoded_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); //decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes)); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); //let &(key, ref value) = decoded.values().next().unwrap(); let (_, key, ref value) = decoded[0]; assert_eq!(key, 88); assert_eq!( str::from_utf8(value.as_slice()).unwrap(), "Cooking MC's like a pound of bacon" ); } use std::fs::File; use std::io::BufReader; use std::io::BufRead; #[test] fn detect_single_char_xor() { let file = File::open("challenge-data/4.txt").unwrap(); let reader = BufReader::new(file); let mut decoded = BTreeMap::new(); let mut line_num = 0; for line in reader.lines() { if let Ok(line) = line { let line_bytes = Vec::from_hex(line).unwrap(); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; line_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes)); } } } line_num += 1; } let mut found = false; for (score, &(line, key, ref value)) in decoded.iter() { let score: f64 = *score as f64 / 1000.0; if score < 100.0 { if line == 170 && key == 53 { let value = str::from_utf8(value).unwrap(); assert_eq!(value, "Now that the party is jumping\n"); found = true; } } } assert!(found, "decrypted string not found!"); } #[test] fn repeating_key_xor()
#[test] fn hamming_distance() { assert_eq!( set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes()) .unwrap(), 37 ); } #[test] fn break_repeating_key_xor() { let mut f = File::open("challenge-data/6.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let mut results: Vec<(f32, usize)> = Vec::with_capacity(40); for keysize in 2..=40 { let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let norm_distances = sequences .chunks(2) .filter(|maybe_pair| maybe_pair.len() == 2) .filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len()) .map(|pair| { set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32 }) .collect::<Vec<f32>>(); let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32; results.push((norm_dist_avg, keysize)); } results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let keysize = results[0].1; assert_eq!(keysize, 29); let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize); for i in 0..keysize { let mut line = Vec::with_capacity(sequences.len()); for j in 0..sequences.len() { if i < sequences[j].len() { line.push(sequences[j][i]); } } transposed.push(line); } let mut key: Vec<u8> = Vec::with_capacity(keysize); for block in transposed { let key_byte = set1::find_best_single_byte_xor(&block); key.push(key_byte); } assert_eq!( str::from_utf8(&key).unwrap(), "Terminator X: Bring the noise" ); let plaintext = set1::xor_repeat(&decoded, &key); let plaintext = str::from_utf8(&plaintext).unwrap(); let mut f = File::open("challenge-data/6_plaintext.txt").unwrap(); let mut plaintext_ref = String::new(); f.read_to_string(&mut plaintext_ref).unwrap(); assert_eq!(plaintext, plaintext_ref); } extern crate openssl; use self::openssl::symm; use self::openssl::symm::Cipher; use std::io::prelude::*; #[test] fn aes_ecb_mode() { let mut f = File::open("challenge-data/7.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let plaintext = symm::decrypt( Cipher::aes_128_ecb(), "YELLOW SUBMARINE".as_bytes(), None,
{ let plaintext = "Burning 'em, if you ain't quick and nimble\n\ I go crazy when I hear a cymbal"; let key = "ICE"; let plaintext = plaintext.as_bytes(); let key = key.as_bytes(); let ciphertext = set1::xor_repeat(&plaintext, &key); let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\ 324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\ 283165286326302e27282f"; let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap(); assert_eq!(ciphertext, ciphertext_ref); }
identifier_body
set1.rs
2 => ((extended[1] & 0x0F) << 2) | ((extended[2] & 0xC0) >> 6), 3 => ((extended[2] & 0x3F) << 0), _ => return Err("too many groups!"), }; let symbol: char = match sextet { c @ 0...25 => char::from(0x41 + c), c @ 26...51 => char::from(0x61 + c - 26), c @ 52...61 => char::from(0x30 + c - 52), 62 => '+', 63 => '/', _ => return Err("too many bits!"), }; if (group.len() as i8) - (i as i8) >= 0 { result.push(symbol); } else { result.push('='); } } } return Ok(result); } pub fn base64_decode(encoded: &str) -> Result<Vec<u8>, &'static str> { let mut result: Vec<u8> = Vec::with_capacity(encoded.len() * 3 / 4); let encoded_stripped = encoded .as_bytes() .iter() .cloned() .filter(|letter| match *letter { b'\n' => false, _ => true, }) .collect::<Vec<u8>>(); for group in encoded_stripped.chunks(4) { if group.len()!= 4 { return Err("chunk too small!"); } let mut padding: i8 = 0; let sextets = group .iter() .map(|letter| match *letter { c @ b'A'...b'Z' => Ok(c as u8 - 0x41), c @ b'a'...b'z' => Ok(c as u8 - 0x61 + 26), c @ b'0'...b'9' => Ok(c as u8 - 0x30 + 52), b'+' => Ok(62), b'/' => Ok(63), b'=' => { padding += 1; Ok(0) } _ => Err("illegal character!"), }) .collect::<Result<Vec<u8>, &'static str>>()?; for i in 0..=2 { let octet = match i { 0 => ((sextets[0] & 0x3F) << 2) | ((sextets[1] & 0x30) >> 4), 1 => ((sextets[1] & 0x0F) << 4) | ((sextets[2] & 0x3C) >> 2), 2 => ((sextets[2] & 0x03) << 6) | ((sextets[3] & 0x3F) >> 0), _ => return Err("too many octets!"), }; if (i as i8) < (3 - padding) { result.push(octet); } } } return Ok(result); } pub fn xor(a: &[u8], b: &[u8]) -> Result<Vec<u8>, &'static str> { if a.len()!= b.len() { return Err("buffer size mismatch"); } let result = a.iter() .zip(b) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return Ok(result); } pub fn xor_in_place(data: &mut [u8], other: &[u8]) -> Result<usize, &'static str> { if data.len()!= other.len() { return Err("buffer size mismatch"); } let xor_count = data.iter_mut() .zip(other) .map(|(data_elem, other_elem)| { *data_elem ^= other_elem; }) .count(); return Ok(xor_count); } pub fn xor_repeat(plaintext: &[u8], key: &[u8]) -> Vec<u8> { let result = plaintext .iter() .zip(key.iter().cycle()) .map(|pair| match pair { (&aa, &bb) => aa ^ bb, }) .collect::<Vec<u8>>(); return result; } use std::collections::BTreeMap; pub fn char_freq_score(text: &[u8]) -> f64 { let mut non_printable_count = 0; let letter_freq: BTreeMap<u8, f64> = btreemap! { b'a' => 0.08167, b'b' => 0.01492, b'c' => 0.02782, b'd' => 0.04253, b'e' => 0.12702, b'f' => 0.02228, b'g' => 0.02015, b'h' => 0.06094, b'i' => 0.06966, b'j' => 0.00153, b'k' => 0.00772, b'l' => 0.04025, b'm' => 0.02406, b'n' => 0.06749, b'o' => 0.07507, b'p' => 0.01929, b'q' => 0.00095, b'r' => 0.05987, b's' => 0.06327, b't' => 0.09056, b'u' => 0.02758, b'v' => 0.00978, b'w' => 0.02360, b'x' => 0.00150, b'y' => 0.01974, b'z' => 0.00074, }; let mut letter_counts: BTreeMap<u8, u32> = BTreeMap::new(); for letter in b'a'..=b'z' { letter_counts.insert(letter, 0); } let mut num_letters = 0; for letter in text { match *letter { // null 0 => {} // non-printable characters 1...9 => non_printable_count += 1, // newline 10 => {} // more non-printable characters 11...31 => non_printable_count += 1, // space 32 => {} // printable symbols, including digits (ascii '!' - '@') 33...64 => {} // upper-case letters c @ 65...90 => { *letter_counts.get_mut(&(c - 65 + 97)).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '[' - '`') 91...96 => {} // lower-case letters c @ 97...122 => { *letter_counts.get_mut(&c).unwrap() += 1; num_letters += 1; } // more printable symbols (ascii '{' - '~') 123...126 => {} // non-printable characters _ => non_printable_count += 1, } } if num_letters == 0 { return 10000.0 + (non_printable_count as f64 * 500.0); } let mut chisquared = 0.0; for (key, prob) in letter_freq { chisquared += (num_letters as f64) * ((*letter_counts.get(&key).unwrap() as f64 / num_letters as f64) - prob).powf(2.0) / prob; } return chisquared + (non_printable_count as f64 * 500.0); } extern crate bit_vec; use self::bit_vec::BitVec; pub fn hamming_distance(a: &[u8], b: &[u8]) -> Result<u32, &'static str> { if a.len()!= b.len() { return Err("sequences must have same length"); } let result = a.iter() .zip(b.iter()) .map(|(aa, bb)| -> u32 { BitVec::from_bytes(&[aa ^ bb]) .iter() .map(|val| val as u32) .sum() }) .sum(); return Ok(result); } pub fn find_best_single_byte_xor(ciphertext: &[u8]) -> u8 { let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; ciphertext.len()]; if let Ok(decoded_bytes) = xor(ciphertext, &key) { let score = char_freq_score(&decoded_bytes); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let (_, key, _) = decoded[0]; return key; } pub fn to_hex(data: &[u8]) -> String { data.iter() .map(|b| format!("{:02x}", b)) .collect::<Vec<String>>() .join("") } #[cfg(test)] mod tests { use set1; extern crate hex; use self::hex::FromHex; #[test] fn base64_encode() { let example_hex = "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d"; let example_bytes = Vec::from_hex(example_hex).unwrap(); if let Ok(b64) = set1::base64_encode(&example_bytes) { assert_eq!( b64, "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t" ); } else { panic!(); } let test = "foobar".as_bytes(); if let Ok(b64) = set1::base64_encode(&test) { assert_eq!(b64, "Zm9vYmFy"); } else { panic!(); } } #[test] fn base64_decode() { let example_b64 = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"; if let Ok(example) = set1::base64_decode(example_b64) { assert_eq!( example, Vec::from_hex( "49276d206b696c6c696e6720796f757220627261696e206c\ 696b65206120706f69736f6e6f7573206d757368726f6f6d" ).unwrap() ); } else { panic!(); } let b64 = "Zm9vYmFy"; if let Ok(test) = set1::base64_decode(&b64) { assert_eq!(test, "foobar".as_bytes()); } else { panic!();
#[test] fn xor() { let a = "1c0111001f010100061a024b53535009181c"; let b = "686974207468652062756c6c277320657965"; let res = "746865206b696420646f6e277420706c6179"; let a_bytes = Vec::from_hex(a).unwrap(); let b_bytes = Vec::from_hex(b).unwrap(); let res_bytes = Vec::from_hex(res).unwrap(); match set1::xor(&a_bytes, &b_bytes) { Ok(r) => assert_eq!(r, res_bytes), Err(str) => panic!(str), }; } use std::collections::BTreeMap; use std::str; #[test] fn single_byte_xor_cipher() { let encoded = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"; let encoded_bytes = Vec::from_hex(encoded).unwrap(); //don't want to use a map here, because we'll lose any values with the same score //let mut decoded: BTreeMap<u64, (u8, Vec<u8>)> = BTreeMap::new(); let mut decoded: Vec<(f64, u8, Vec<u8>)> = Vec::with_capacity(256); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; encoded_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&encoded_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); //decoded.insert((score * 1000.0) as u64, (i as u8, decoded_bytes)); decoded.push((score, i as u8, decoded_bytes)); } } decoded.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); //let &(key, ref value) = decoded.values().next().unwrap(); let (_, key, ref value) = decoded[0]; assert_eq!(key, 88); assert_eq!( str::from_utf8(value.as_slice()).unwrap(), "Cooking MC's like a pound of bacon" ); } use std::fs::File; use std::io::BufReader; use std::io::BufRead; #[test] fn detect_single_char_xor() { let file = File::open("challenge-data/4.txt").unwrap(); let reader = BufReader::new(file); let mut decoded = BTreeMap::new(); let mut line_num = 0; for line in reader.lines() { if let Ok(line) = line { let line_bytes = Vec::from_hex(line).unwrap(); for i in 0..=256 { let key: Vec<u8> = vec![i as u8; line_bytes.len()]; if let Ok(decoded_bytes) = set1::xor(&line_bytes, &key) { let score = set1::char_freq_score(&decoded_bytes); decoded.insert((score * 1000.0) as u64, (line_num, i as u8, decoded_bytes)); } } } line_num += 1; } let mut found = false; for (score, &(line, key, ref value)) in decoded.iter() { let score: f64 = *score as f64 / 1000.0; if score < 100.0 { if line == 170 && key == 53 { let value = str::from_utf8(value).unwrap(); assert_eq!(value, "Now that the party is jumping\n"); found = true; } } } assert!(found, "decrypted string not found!"); } #[test] fn repeating_key_xor() { let plaintext = "Burning 'em, if you ain't quick and nimble\n\ I go crazy when I hear a cymbal"; let key = "ICE"; let plaintext = plaintext.as_bytes(); let key = key.as_bytes(); let ciphertext = set1::xor_repeat(&plaintext, &key); let ciphertext_ref = "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226\ 324272765272a282b2f20430a652e2c652a3124333a653e2b2027630c692b20\ 283165286326302e27282f"; let ciphertext_ref = Vec::from_hex(ciphertext_ref).unwrap(); assert_eq!(ciphertext, ciphertext_ref); } #[test] fn hamming_distance() { assert_eq!( set1::hamming_distance("this is a test".as_bytes(), "wokka wokka!!!".as_bytes()) .unwrap(), 37 ); } #[test] fn break_repeating_key_xor() { let mut f = File::open("challenge-data/6.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let mut results: Vec<(f32, usize)> = Vec::with_capacity(40); for keysize in 2..=40 { let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let norm_distances = sequences .chunks(2) .filter(|maybe_pair| maybe_pair.len() == 2) .filter(|maybe_same_len| maybe_same_len[0].len() == maybe_same_len[1].len()) .map(|pair| { set1::hamming_distance(pair[0], pair[1]).unwrap() as f32 / keysize as f32 }) .collect::<Vec<f32>>(); let norm_dist_avg: f32 = &norm_distances.iter().sum() / norm_distances.len() as f32; results.push((norm_dist_avg, keysize)); } results.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap()); let keysize = results[0].1; assert_eq!(keysize, 29); let sequences = decoded.chunks(keysize).collect::<Vec<&[u8]>>(); let mut transposed: Vec<Vec<u8>> = Vec::with_capacity(keysize); for i in 0..keysize { let mut line = Vec::with_capacity(sequences.len()); for j in 0..sequences.len() { if i < sequences[j].len() { line.push(sequences[j][i]); } } transposed.push(line); } let mut key: Vec<u8> = Vec::with_capacity(keysize); for block in transposed { let key_byte = set1::find_best_single_byte_xor(&block); key.push(key_byte); } assert_eq!( str::from_utf8(&key).unwrap(), "Terminator X: Bring the noise" ); let plaintext = set1::xor_repeat(&decoded, &key); let plaintext = str::from_utf8(&plaintext).unwrap(); let mut f = File::open("challenge-data/6_plaintext.txt").unwrap(); let mut plaintext_ref = String::new(); f.read_to_string(&mut plaintext_ref).unwrap(); assert_eq!(plaintext, plaintext_ref); } extern crate openssl; use self::openssl::symm; use self::openssl::symm::Cipher; use std::io::prelude::*; #[test] fn aes_ecb_mode() { let mut f = File::open("challenge-data/7.txt").unwrap(); let mut encoded = String::new(); f.read_to_string(&mut encoded).unwrap(); let decoded = set1::base64_decode(&encoded).unwrap(); let plaintext = symm::decrypt( Cipher::aes_128_ecb(), "YELLOW SUBMARINE".as_bytes(), None,
} }
random_line_split
main.rs
#[macro_use] extern crate colored_print; extern crate atty; use colored_print::color::ConsoleColor; use colored_print::color::ConsoleColor::*; use std::env; use std::ffi::OsStr; use std::fmt; use std::fmt::Display; use std::fs; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; fn colorize() -> bool { use atty; use atty::Stream; atty::is(Stream::Stdout) } fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String { let name = path .file_stem() .expect("internal error: there are no files without name!") .to_str() .expect("internal error: file name cannot be represented in UTF-8."); let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!("")); format!("{}{}{}", name, suffix, ext) } enum CompilationResult { Success { ir_path: PathBuf, llvm_ir: String, cc_output: String, }, Failure { cc_output: String, }, } /// returns the result of compilation with clang (for reference) fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll"))); // compile let output = Command::new("clang") .arg("-O0") .arg("-S") .arg("-emit-llvm") .arg("-o") .arg(ir_path.display().to_string()) .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!ir_path.exists() { return Ok(CompilationResult::Failure { cc_output }); } let mut llvm_ir = String::new(); File::open(&ir_path)?.read_to_string(&mut llvm_ir)?; Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } /// returns the llvm_ir of compilation with our current compiler fn current_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll"))); // compile let output = Command::new("cargo") .arg("run") .arg("--") .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if output.stdout.is_empty() { // compilation failed. return Ok(CompilationResult::Failure { cc_output }); } File::create(&ir_path)?.write_all(&output.stdout)?; let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned(); Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } enum AssemblyResult { Success { asm_output: String, exec_path: PathBuf, }, Failure { asm_output: String, }, Unreached, } fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> { let exec_path = if cfg!(windows) { src_path.with_extension("exe") } else { let file_name = src_path .file_stem() .expect("internal error: no file has no basename"); src_path.with_file_name(file_name) }; if!src_path.exists() { panic!("internal error: compilation has succeeded but no LLVM IR?"); } let output = Command::new("clang") .arg("-o") .arg(&exec_path) .arg(&src_path) .output()?; let asm_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!exec_path.exists() { return Ok(AssemblyResult::Failure { asm_output }); } Ok(AssemblyResult::Success { asm_output, exec_path, }) } enum ExecutionResult { Success { status: Option<i32>, stdout: String, stderr: String, }, Unreached, } /// returns the execution of the binary placed in the specified path fn execute(path: &Path) -> io::Result<ExecutionResult> { if!path.exists() { return Ok(ExecutionResult::Success { status: None, stdout: String::new(), stderr: String::new(), }); } let mut child = Command::new(&path) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let status = child.wait()?; let mut child_stdout = child .stdout .expect("internal error: failed to get child stdout."); let mut child_stderr = child .stderr .expect("internel error: failed to get child stderr."); let (mut stdout, mut stderr) = (String::new(), String::new()); child_stdout.read_to_string(&mut stdout)?; child_stderr.read_to_string(&mut stderr)?; let status = status.code(); Ok(ExecutionResult::Success { status, stdout, stderr, }) } fn print_heading(color: ConsoleColor, heading: &str)
fn print_output(retval: Option<i32>, output: &str) { colored_print!{ colorize(); Reset, "{}", output; } if let Some(code) = retval { colored_println!{ colorize(); Cyan, "return code"; Reset, ": {}", code; } } } fn print_stderr(stderr: impl Display) { colored_print!{ colorize(); LightMagenta, "{}", stderr; } } #[derive(Debug, Copy, Clone)] enum Version { Reference, Current, } impl Version { pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> { match *self { Version::Reference => reference_compile, Version::Current => current_compile, } } } impl fmt::Display for Version { fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result { match *self { Version::Reference => write!(b, "Reference"), Version::Current => write!(b, " Current "), } } } struct Results { compilation: CompilationResult, assembly: AssemblyResult, execution: ExecutionResult, } fn do_for(version: Version, path: &Path) -> io::Result<Results> { let (compilation, assembly, execution); // explicitly denote borrowing region { compilation = (version.get_compiler_func())(&path)?; let ir_path = match compilation { failure @ CompilationResult::Failure {.. } => { return Ok(Results { compilation: failure, assembly: AssemblyResult::Unreached, execution: ExecutionResult::Unreached, }) } CompilationResult::Success { ref ir_path,.. } => ir_path.clone(), }; assembly = compile_llvm_ir(&ir_path)?; let exec_path = match assembly { failure @ AssemblyResult::Failure {.. } => { return Ok(Results { compilation: compilation, assembly: failure, execution: ExecutionResult::Unreached, }) } AssemblyResult::Success { ref exec_path,.. } => exec_path.clone(), AssemblyResult::Unreached => unreachable!(), }; execution = execute(&exec_path)?; } Ok(Results { compilation, assembly, execution, }) } fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) { const OK: (bool, ConsoleColor, &str) = (true, Green, "OK"); const NG: (bool, ConsoleColor, &str) = (false, Red, "NG"); use ExecutionResult::Success; match (refr, curr) { ( Success { status: ref refr_status, stdout: ref refr_stdout, .. }, Success { status: ref curr_status, stdout: ref curr_stdout, .. }, ) => { if (refr_status, refr_stdout) == (curr_status, curr_stdout) { OK } else { NG } } _ => NG, } } fn print_for(version: Version, results: Results) { print_heading( LightGreen, &format!("==================== {} ====================", version), ); use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER}; print_heading(LightBlue, "> Compilation (C)"); match results.compilation { CR::Success { cc_output, llvm_ir,.. } => { print_stderr(&cc_output); print_output(None, &llvm_ir); } CR::Failure { cc_output,.. } => { print_stderr(&cc_output); return; } } print_heading(LightBlue, "> Compilation (LLVM IR)"); match results.assembly { AR::Success { asm_output,.. } => { print_stderr(&asm_output); } AR::Failure { asm_output,.. } => { print_stderr(&asm_output); return; } AR::Unreached => unreachable!(), } print_heading(LightBlue, "> Execution"); match results.execution { ER::Success { status, stdout, stderr, } => { print_stderr(&stderr); print_output(status, &stdout); } ER::Unreached => unreachable!(), } } fn main() -> io::Result<()> { let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v"); let test_src_dir: PathBuf = ["test", "ok"].iter().collect(); walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str)!= Some("c"), |path| { if verbose { colored_println! { colorize(); LightGreen, "Removing "; Reset, "{}", path.display(); } } fs::remove_file(&path) }, )?; let mut path_to_test: Vec<_> = env::args() .skip(1) .filter(|arg|!arg.starts_with("-")) .map(|file_name| test_src_dir.join(file_name)) .collect(); if path_to_test.is_empty() { path_to_test = walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str) == Some("c"), |path| Ok(path.to_path_buf()), )?; } let mut any_fails = false; for path in path_to_test { colored_print!{ colorize(); LightGreen, " Testing "; Reset, "file "; Yellow, "{}", path.display(); Reset, "... "; } if!path.exists() { println!("not found"); continue; } let refr = do_for(Version::Reference, &path)?; let curr = do_for(Version::Current, &path)?; let (status, color, judge) = judge(&refr.execution, &curr.execution); colored_println!{ colorize(); color, "{}", judge; } // print info when verbose mode or something fails if verbose ||!status { print_for(Version::Reference, refr); print_for(Version::Current, curr); } any_fails |=!status; } if!any_fails { Ok(()) } else { Err(io::Error::new(io::ErrorKind::Other, "some test fails.")) } } fn walk_dir<T>( dir: &Path, path_filter: impl Fn(&Path) -> bool + Copy, cb: impl Fn(&Path) -> io::Result<T> + Copy, ) -> io::Result<Vec<T>> { let mut result = Vec::new(); for entry in fs::read_dir(dir)? { let entry = entry?; let path = entry.path(); if!path_filter(&path) { continue; } if path.is_dir() { walk_dir(&path, path_filter, cb)?; } else { result.push(cb(&path)?); } } Ok(result) }
{ colored_println!{ colorize(); color, "{} ", heading; } }
identifier_body
main.rs
#[macro_use] extern crate colored_print; extern crate atty; use colored_print::color::ConsoleColor; use colored_print::color::ConsoleColor::*; use std::env; use std::ffi::OsStr; use std::fmt; use std::fmt::Display; use std::fs; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; fn colorize() -> bool { use atty; use atty::Stream; atty::is(Stream::Stdout) } fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String { let name = path .file_stem() .expect("internal error: there are no files without name!") .to_str() .expect("internal error: file name cannot be represented in UTF-8."); let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!("")); format!("{}{}{}", name, suffix, ext) } enum CompilationResult { Success { ir_path: PathBuf, llvm_ir: String, cc_output: String, }, Failure { cc_output: String, }, } /// returns the result of compilation with clang (for reference) fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll"))); // compile
.arg("-emit-llvm") .arg("-o") .arg(ir_path.display().to_string()) .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!ir_path.exists() { return Ok(CompilationResult::Failure { cc_output }); } let mut llvm_ir = String::new(); File::open(&ir_path)?.read_to_string(&mut llvm_ir)?; Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } /// returns the llvm_ir of compilation with our current compiler fn current_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll"))); // compile let output = Command::new("cargo") .arg("run") .arg("--") .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if output.stdout.is_empty() { // compilation failed. return Ok(CompilationResult::Failure { cc_output }); } File::create(&ir_path)?.write_all(&output.stdout)?; let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned(); Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } enum AssemblyResult { Success { asm_output: String, exec_path: PathBuf, }, Failure { asm_output: String, }, Unreached, } fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> { let exec_path = if cfg!(windows) { src_path.with_extension("exe") } else { let file_name = src_path .file_stem() .expect("internal error: no file has no basename"); src_path.with_file_name(file_name) }; if!src_path.exists() { panic!("internal error: compilation has succeeded but no LLVM IR?"); } let output = Command::new("clang") .arg("-o") .arg(&exec_path) .arg(&src_path) .output()?; let asm_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!exec_path.exists() { return Ok(AssemblyResult::Failure { asm_output }); } Ok(AssemblyResult::Success { asm_output, exec_path, }) } enum ExecutionResult { Success { status: Option<i32>, stdout: String, stderr: String, }, Unreached, } /// returns the execution of the binary placed in the specified path fn execute(path: &Path) -> io::Result<ExecutionResult> { if!path.exists() { return Ok(ExecutionResult::Success { status: None, stdout: String::new(), stderr: String::new(), }); } let mut child = Command::new(&path) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let status = child.wait()?; let mut child_stdout = child .stdout .expect("internal error: failed to get child stdout."); let mut child_stderr = child .stderr .expect("internel error: failed to get child stderr."); let (mut stdout, mut stderr) = (String::new(), String::new()); child_stdout.read_to_string(&mut stdout)?; child_stderr.read_to_string(&mut stderr)?; let status = status.code(); Ok(ExecutionResult::Success { status, stdout, stderr, }) } fn print_heading(color: ConsoleColor, heading: &str) { colored_println!{ colorize(); color, "{} ", heading; } } fn print_output(retval: Option<i32>, output: &str) { colored_print!{ colorize(); Reset, "{}", output; } if let Some(code) = retval { colored_println!{ colorize(); Cyan, "return code"; Reset, ": {}", code; } } } fn print_stderr(stderr: impl Display) { colored_print!{ colorize(); LightMagenta, "{}", stderr; } } #[derive(Debug, Copy, Clone)] enum Version { Reference, Current, } impl Version { pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> { match *self { Version::Reference => reference_compile, Version::Current => current_compile, } } } impl fmt::Display for Version { fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result { match *self { Version::Reference => write!(b, "Reference"), Version::Current => write!(b, " Current "), } } } struct Results { compilation: CompilationResult, assembly: AssemblyResult, execution: ExecutionResult, } fn do_for(version: Version, path: &Path) -> io::Result<Results> { let (compilation, assembly, execution); // explicitly denote borrowing region { compilation = (version.get_compiler_func())(&path)?; let ir_path = match compilation { failure @ CompilationResult::Failure {.. } => { return Ok(Results { compilation: failure, assembly: AssemblyResult::Unreached, execution: ExecutionResult::Unreached, }) } CompilationResult::Success { ref ir_path,.. } => ir_path.clone(), }; assembly = compile_llvm_ir(&ir_path)?; let exec_path = match assembly { failure @ AssemblyResult::Failure {.. } => { return Ok(Results { compilation: compilation, assembly: failure, execution: ExecutionResult::Unreached, }) } AssemblyResult::Success { ref exec_path,.. } => exec_path.clone(), AssemblyResult::Unreached => unreachable!(), }; execution = execute(&exec_path)?; } Ok(Results { compilation, assembly, execution, }) } fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) { const OK: (bool, ConsoleColor, &str) = (true, Green, "OK"); const NG: (bool, ConsoleColor, &str) = (false, Red, "NG"); use ExecutionResult::Success; match (refr, curr) { ( Success { status: ref refr_status, stdout: ref refr_stdout, .. }, Success { status: ref curr_status, stdout: ref curr_stdout, .. }, ) => { if (refr_status, refr_stdout) == (curr_status, curr_stdout) { OK } else { NG } } _ => NG, } } fn print_for(version: Version, results: Results) { print_heading( LightGreen, &format!("==================== {} ====================", version), ); use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER}; print_heading(LightBlue, "> Compilation (C)"); match results.compilation { CR::Success { cc_output, llvm_ir,.. } => { print_stderr(&cc_output); print_output(None, &llvm_ir); } CR::Failure { cc_output,.. } => { print_stderr(&cc_output); return; } } print_heading(LightBlue, "> Compilation (LLVM IR)"); match results.assembly { AR::Success { asm_output,.. } => { print_stderr(&asm_output); } AR::Failure { asm_output,.. } => { print_stderr(&asm_output); return; } AR::Unreached => unreachable!(), } print_heading(LightBlue, "> Execution"); match results.execution { ER::Success { status, stdout, stderr, } => { print_stderr(&stderr); print_output(status, &stdout); } ER::Unreached => unreachable!(), } } fn main() -> io::Result<()> { let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v"); let test_src_dir: PathBuf = ["test", "ok"].iter().collect(); walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str)!= Some("c"), |path| { if verbose { colored_println! { colorize(); LightGreen, "Removing "; Reset, "{}", path.display(); } } fs::remove_file(&path) }, )?; let mut path_to_test: Vec<_> = env::args() .skip(1) .filter(|arg|!arg.starts_with("-")) .map(|file_name| test_src_dir.join(file_name)) .collect(); if path_to_test.is_empty() { path_to_test = walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str) == Some("c"), |path| Ok(path.to_path_buf()), )?; } let mut any_fails = false; for path in path_to_test { colored_print!{ colorize(); LightGreen, " Testing "; Reset, "file "; Yellow, "{}", path.display(); Reset, "... "; } if!path.exists() { println!("not found"); continue; } let refr = do_for(Version::Reference, &path)?; let curr = do_for(Version::Current, &path)?; let (status, color, judge) = judge(&refr.execution, &curr.execution); colored_println!{ colorize(); color, "{}", judge; } // print info when verbose mode or something fails if verbose ||!status { print_for(Version::Reference, refr); print_for(Version::Current, curr); } any_fails |=!status; } if!any_fails { Ok(()) } else { Err(io::Error::new(io::ErrorKind::Other, "some test fails.")) } } fn walk_dir<T>( dir: &Path, path_filter: impl Fn(&Path) -> bool + Copy, cb: impl Fn(&Path) -> io::Result<T> + Copy, ) -> io::Result<Vec<T>> { let mut result = Vec::new(); for entry in fs::read_dir(dir)? { let entry = entry?; let path = entry.path(); if!path_filter(&path) { continue; } if path.is_dir() { walk_dir(&path, path_filter, cb)?; } else { result.push(cb(&path)?); } } Ok(result) }
let output = Command::new("clang") .arg("-O0") .arg("-S")
random_line_split
main.rs
#[macro_use] extern crate colored_print; extern crate atty; use colored_print::color::ConsoleColor; use colored_print::color::ConsoleColor::*; use std::env; use std::ffi::OsStr; use std::fmt; use std::fmt::Display; use std::fs; use std::fs::File; use std::io; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; fn colorize() -> bool { use atty; use atty::Stream; atty::is(Stream::Stdout) } fn modified_file_name(path: &Path, suffix: &str, ext: Option<&str>) -> String { let name = path .file_stem() .expect("internal error: there are no files without name!") .to_str() .expect("internal error: file name cannot be represented in UTF-8."); let ext = ext.map(|x| format!(".{}", x)).unwrap_or(format!("")); format!("{}{}{}", name, suffix, ext) } enum CompilationResult { Success { ir_path: PathBuf, llvm_ir: String, cc_output: String, }, Failure { cc_output: String, }, } /// returns the result of compilation with clang (for reference) fn reference_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_ref", Some("ll"))); // compile let output = Command::new("clang") .arg("-O0") .arg("-S") .arg("-emit-llvm") .arg("-o") .arg(ir_path.display().to_string()) .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!ir_path.exists() { return Ok(CompilationResult::Failure { cc_output }); } let mut llvm_ir = String::new(); File::open(&ir_path)?.read_to_string(&mut llvm_ir)?; Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } /// returns the llvm_ir of compilation with our current compiler fn current_compile(src_path: &Path) -> io::Result<CompilationResult> { let ir_path = src_path.with_file_name(modified_file_name(src_path, "_cur", Some("ll"))); // compile let output = Command::new("cargo") .arg("run") .arg("--") .arg(src_path.display().to_string()) .output()?; let cc_output = String::from_utf8_lossy(&output.stderr).into_owned(); if output.stdout.is_empty() { // compilation failed. return Ok(CompilationResult::Failure { cc_output }); } File::create(&ir_path)?.write_all(&output.stdout)?; let llvm_ir = String::from_utf8_lossy(&output.stdout).into_owned(); Ok(CompilationResult::Success { ir_path, llvm_ir, cc_output, }) } enum AssemblyResult { Success { asm_output: String, exec_path: PathBuf, }, Failure { asm_output: String, }, Unreached, } fn compile_llvm_ir(src_path: &Path) -> io::Result<AssemblyResult> { let exec_path = if cfg!(windows) { src_path.with_extension("exe") } else { let file_name = src_path .file_stem() .expect("internal error: no file has no basename"); src_path.with_file_name(file_name) }; if!src_path.exists() { panic!("internal error: compilation has succeeded but no LLVM IR?"); } let output = Command::new("clang") .arg("-o") .arg(&exec_path) .arg(&src_path) .output()?; let asm_output = String::from_utf8_lossy(&output.stderr).into_owned(); if!exec_path.exists() { return Ok(AssemblyResult::Failure { asm_output }); } Ok(AssemblyResult::Success { asm_output, exec_path, }) } enum ExecutionResult { Success { status: Option<i32>, stdout: String, stderr: String, }, Unreached, } /// returns the execution of the binary placed in the specified path fn execute(path: &Path) -> io::Result<ExecutionResult> { if!path.exists() { return Ok(ExecutionResult::Success { status: None, stdout: String::new(), stderr: String::new(), }); } let mut child = Command::new(&path) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let status = child.wait()?; let mut child_stdout = child .stdout .expect("internal error: failed to get child stdout."); let mut child_stderr = child .stderr .expect("internel error: failed to get child stderr."); let (mut stdout, mut stderr) = (String::new(), String::new()); child_stdout.read_to_string(&mut stdout)?; child_stderr.read_to_string(&mut stderr)?; let status = status.code(); Ok(ExecutionResult::Success { status, stdout, stderr, }) } fn print_heading(color: ConsoleColor, heading: &str) { colored_println!{ colorize(); color, "{} ", heading; } } fn print_output(retval: Option<i32>, output: &str) { colored_print!{ colorize(); Reset, "{}", output; } if let Some(code) = retval { colored_println!{ colorize(); Cyan, "return code"; Reset, ": {}", code; } } } fn print_stderr(stderr: impl Display) { colored_print!{ colorize(); LightMagenta, "{}", stderr; } } #[derive(Debug, Copy, Clone)] enum Version { Reference, Current, } impl Version { pub fn get_compiler_func(&self) -> fn(path: &Path) -> io::Result<CompilationResult> { match *self { Version::Reference => reference_compile, Version::Current => current_compile, } } } impl fmt::Display for Version { fn fmt(&self, b: &mut fmt::Formatter) -> fmt::Result { match *self { Version::Reference => write!(b, "Reference"), Version::Current => write!(b, " Current "), } } } struct
{ compilation: CompilationResult, assembly: AssemblyResult, execution: ExecutionResult, } fn do_for(version: Version, path: &Path) -> io::Result<Results> { let (compilation, assembly, execution); // explicitly denote borrowing region { compilation = (version.get_compiler_func())(&path)?; let ir_path = match compilation { failure @ CompilationResult::Failure {.. } => { return Ok(Results { compilation: failure, assembly: AssemblyResult::Unreached, execution: ExecutionResult::Unreached, }) } CompilationResult::Success { ref ir_path,.. } => ir_path.clone(), }; assembly = compile_llvm_ir(&ir_path)?; let exec_path = match assembly { failure @ AssemblyResult::Failure {.. } => { return Ok(Results { compilation: compilation, assembly: failure, execution: ExecutionResult::Unreached, }) } AssemblyResult::Success { ref exec_path,.. } => exec_path.clone(), AssemblyResult::Unreached => unreachable!(), }; execution = execute(&exec_path)?; } Ok(Results { compilation, assembly, execution, }) } fn judge(refr: &ExecutionResult, curr: &ExecutionResult) -> (bool, ConsoleColor, &'static str) { const OK: (bool, ConsoleColor, &str) = (true, Green, "OK"); const NG: (bool, ConsoleColor, &str) = (false, Red, "NG"); use ExecutionResult::Success; match (refr, curr) { ( Success { status: ref refr_status, stdout: ref refr_stdout, .. }, Success { status: ref curr_status, stdout: ref curr_stdout, .. }, ) => { if (refr_status, refr_stdout) == (curr_status, curr_stdout) { OK } else { NG } } _ => NG, } } fn print_for(version: Version, results: Results) { print_heading( LightGreen, &format!("==================== {} ====================", version), ); use {AssemblyResult as AR, CompilationResult as CR, ExecutionResult as ER}; print_heading(LightBlue, "> Compilation (C)"); match results.compilation { CR::Success { cc_output, llvm_ir,.. } => { print_stderr(&cc_output); print_output(None, &llvm_ir); } CR::Failure { cc_output,.. } => { print_stderr(&cc_output); return; } } print_heading(LightBlue, "> Compilation (LLVM IR)"); match results.assembly { AR::Success { asm_output,.. } => { print_stderr(&asm_output); } AR::Failure { asm_output,.. } => { print_stderr(&asm_output); return; } AR::Unreached => unreachable!(), } print_heading(LightBlue, "> Execution"); match results.execution { ER::Success { status, stdout, stderr, } => { print_stderr(&stderr); print_output(status, &stdout); } ER::Unreached => unreachable!(), } } fn main() -> io::Result<()> { let verbose = env::args().any(|arg| arg == "--verbose" || arg == "-v"); let test_src_dir: PathBuf = ["test", "ok"].iter().collect(); walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str)!= Some("c"), |path| { if verbose { colored_println! { colorize(); LightGreen, "Removing "; Reset, "{}", path.display(); } } fs::remove_file(&path) }, )?; let mut path_to_test: Vec<_> = env::args() .skip(1) .filter(|arg|!arg.starts_with("-")) .map(|file_name| test_src_dir.join(file_name)) .collect(); if path_to_test.is_empty() { path_to_test = walk_dir( &test_src_dir, |path| path.extension().and_then(OsStr::to_str) == Some("c"), |path| Ok(path.to_path_buf()), )?; } let mut any_fails = false; for path in path_to_test { colored_print!{ colorize(); LightGreen, " Testing "; Reset, "file "; Yellow, "{}", path.display(); Reset, "... "; } if!path.exists() { println!("not found"); continue; } let refr = do_for(Version::Reference, &path)?; let curr = do_for(Version::Current, &path)?; let (status, color, judge) = judge(&refr.execution, &curr.execution); colored_println!{ colorize(); color, "{}", judge; } // print info when verbose mode or something fails if verbose ||!status { print_for(Version::Reference, refr); print_for(Version::Current, curr); } any_fails |=!status; } if!any_fails { Ok(()) } else { Err(io::Error::new(io::ErrorKind::Other, "some test fails.")) } } fn walk_dir<T>( dir: &Path, path_filter: impl Fn(&Path) -> bool + Copy, cb: impl Fn(&Path) -> io::Result<T> + Copy, ) -> io::Result<Vec<T>> { let mut result = Vec::new(); for entry in fs::read_dir(dir)? { let entry = entry?; let path = entry.path(); if!path_filter(&path) { continue; } if path.is_dir() { walk_dir(&path, path_filter, cb)?; } else { result.push(cb(&path)?); } } Ok(result) }
Results
identifier_name
cylinder.rs
//! Construct cylinders that are curved sheets, not volumes. use surface::{Sheet, LatticeType}; use coord::{Coord, Direction, Translate, rotate_coords, rotate_planar_coords_to_alignment}; use describe::{unwrap_name, Describe}; use error::Result; use iterator::{ResidueIter, ResidueIterOut}; use system::*; use std::f64::consts::PI; use std::fmt; use std::fmt::{Display, Formatter}; impl_component![Cylinder]; impl_translate![Cylinder]; #[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize)] /// Cylinders can be capped in either or both ends. pub enum CylinderCap { Top, Bottom, Both, } impl Display for CylinderCap { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match *self { CylinderCap::Top => write!(f, "Top"), CylinderCap::Bottom => write!(f, "Bottom"), CylinderCap::Both => write!(f, "Both"), } } } #[derive(Clone, Debug, Deserialize, Serialize)] /// A 2D cylindrical surface. pub struct Cylinder { /// Name of cylinder in database. pub name: Option<String>, /// Optional residue placed at each coordinate. If not set the cylinder describes /// a general collection of coordinates. pub residue: Option<Residue>, /// lattice type used to construct the cylinder surface structure. pub lattice: LatticeType, /// The axis along which the cylinder is aligned. pub alignment: Direction, /// Cylinders can be capped at its ends. pub cap: Option<CylinderCap>, #[serde(skip)] /// Origin of the cylinder. Located in the center of the bottom. pub origin: Coord, #[serde(skip)] /// Radius of cylinder. pub radius: f64, #[serde(skip)] /// Height of cylinder. pub height: f64, #[serde(skip)] /// List of coordinates belonging to the cylinder. Relative to the `origin. pub coords: Vec<Coord>, } impl Cylinder { /// Construct the cylinder coordinates and return the object. /// /// # Errors /// Returns an error if either the radius or height is non-positive. pub fn construct(self) -> Result<Cylinder> { // Bend a `Sheet` of the chosen lattice type into the cylinder. let length = 2.0 * PI * self.radius; let width = self.height; let sheet = Sheet { name: None, residue: None, lattice: self.lattice.clone(), std_z: None, origin: Coord::default(), normal: Direction::Z, length, width, coords: vec![], }.construct()?; let final_radius = sheet.length / (2.0 * PI); let final_height = sheet.width; // The cylinder will be created aligned to the Y axis let mut coords: Vec<_> = sheet.coords .iter() .map(|coord| { let (x0, y, _) = coord.to_tuple(); let angle = (x0 * 360.0 / sheet.length).to_radians(); let x = final_radius * angle.sin(); let z = -final_radius * angle.cos(); Coord::new(x, y, z) }) .collect(); if let Some(cap) = self.cap { // The cylinder is aligned along the y axis. Construct a cap from // the same sheet and rotate it to match. let mut bottom = sheet.to_circle(final_radius); //.rotate(Direction::X); bottom.coords = rotate_planar_coords_to_alignment(&bottom.coords, Direction::Z, Direction::Y); // Get the top cap coordinates by shifting the bottom ones, not just the origin. let top_coords: Vec<_> = bottom.coords .iter() .map(|&coord| coord + Coord::new(0.0, final_height, 0.0)) .collect(); match cap { CylinderCap::Bottom => coords.extend_from_slice(&bottom.coords), CylinderCap::Top => coords.extend_from_slice(&top_coords), CylinderCap::Both => { coords.extend_from_slice(&bottom.coords); coords.extend_from_slice(&top_coords); } } } // Rotate the cylinder once along the x-axis to align them to the z-axis. Ok(Cylinder { alignment: Direction::Z, radius: final_radius, height: final_height, coords: rotate_coords(&coords, Direction::X), .. self }) } /// Calculate the box size. fn calc_box_size(&self) -> Coord { let diameter = 2.0 * self.radius; match self.alignment { Direction::X => Coord::new(self.height, diameter, diameter), Direction::Y => Coord::new(diameter, self.height, diameter), Direction::Z => Coord::new(diameter, diameter, self.height), } } } impl Describe for Cylinder { fn describe(&self) -> String { format!("{} (Cylinder surface of radius {:.2} and height {:.2} at {})", unwrap_name(&self.name), self.radius, self.height, self.origin) }
} #[cfg(test)] mod tests { use super::*; use surface::LatticeType::*; fn setup_cylinder(radius: f64, height: f64, lattice: &LatticeType) -> Cylinder { Cylinder { name: None, residue: None, lattice: lattice.clone(), alignment: Direction::Z, cap: None, origin: Coord::default(), radius, height, coords: vec![], } } #[test] fn cylinder_is_bent_from_sheet_as_expected() { let radius = 2.0; let height = 5.0; let density = 10.0; let lattice = PoissonDisc { density }; let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap(); // We should have a rough surface density match let expected = 2.0 * PI * radius * height * density; assert!((expected - cylinder.coords.len() as f64).abs() / expected < 0.1); // Not all coords should be at z = 0, ie. not still a sheet let sum_z = cylinder.coords.iter().map(|&Coord { x: _, y: _, z }| z.abs()).sum::<f64>(); assert!(sum_z > 0.0); // Currently the alignment should be along Z assert_eq!(Direction::Z, cylinder.alignment); // Rigorous test of coordinate structure for coord in cylinder.coords { let (r, h) = Coord::ORIGO.distance_cylindrical(coord, Direction::Z); assert!(r <= cylinder.radius); assert!(h >= 0.0 && h <= cylinder.height); } } #[test] fn cylinder_corrects_radius_and_height_to_match_lattice_spacing() { let radius = 1.0; // should give circumference = 2 * PI let height = 5.0; let a = 1.0; // not a match to the circumference let b = 1.1; // not a match to the height let lattice = Triclinic { a, b, gamma: 90.0 }; let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap(); assert_ne!(radius, cylinder.radius); assert_ne!(height, cylinder.height); // The best match to the circumference 2 * PI is the multiple 6 * a. assert_eq!(6.0 * a / (2.0 * PI), cylinder.radius); assert_eq!(5.0 * b, cylinder.height); } #[test] fn constructing_cylinder_with_negative_radius_or_height_returns_error() { let lattice = PoissonDisc { density: 10.0 }; assert!(setup_cylinder(-1.0, 1.0, &lattice).construct().is_err()); assert!(setup_cylinder(1.0, -1.0, &lattice).construct().is_err()); assert!(setup_cylinder(1.0, 1.0, &lattice).construct().is_ok()); } #[test] fn add_caps_to_cylinder() { let radius = 2.0; let height = 5.0; let lattice = Hexagonal { a: 0.1 }; let mut conf = setup_cylinder(radius, height, &lattice); // Without caps let cylinder = conf.clone().construct().unwrap(); let num_coords = cylinder.coords.len(); // With a bottom cap conf.cap = Some(CylinderCap::Bottom); let cylinder_cap = conf.clone().construct().unwrap(); // The first coordinates should be the original cylinder let (original, bottom) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); assert!(bottom.len() > 0); // All the bottom coordinates should be at z = 0 for coord in bottom { assert_eq!(coord.z, 0.0); } // A top cap conf.cap = Some(CylinderCap::Top); let cylinder_cap = conf.clone().construct().unwrap(); let (original, top) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); assert_eq!(top.len(), bottom.len()); // All the top coordinates should be at the cylinder height for coord in top { assert_eq!(coord.z, cylinder.height); } // Both caps conf.cap = Some(CylinderCap::Both); let cylinder_cap = conf.clone().construct().unwrap(); let (original, bottom_and_top) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); let (bottom_from_both, top_from_both) = bottom_and_top.split_at(bottom.len()); assert_eq!(bottom, bottom_from_both); assert_eq!(top, top_from_both); } #[test] fn calc_box_size_of_cylinder() { let radius = 2.0; let height = 5.0; let lattice = Hexagonal { a: 0.1 }; // Check each direction let mut cylinder = Cylinder { alignment: Direction::X, .. setup_cylinder(radius, height, &lattice) }; let diameter = 2.0 * radius; assert_eq!(Coord::new(height, diameter, diameter), cylinder.calc_box_size()); cylinder.alignment = Direction::Y; assert_eq!(Coord::new(diameter, height, diameter), cylinder.calc_box_size()); cylinder.alignment = Direction::Z; assert_eq!(Coord::new(diameter, diameter, height), cylinder.calc_box_size()); } }
fn describe_short(&self) -> String { format!("{} (Cylinder)", unwrap_name(&self.name)) }
random_line_split
cylinder.rs
//! Construct cylinders that are curved sheets, not volumes. use surface::{Sheet, LatticeType}; use coord::{Coord, Direction, Translate, rotate_coords, rotate_planar_coords_to_alignment}; use describe::{unwrap_name, Describe}; use error::Result; use iterator::{ResidueIter, ResidueIterOut}; use system::*; use std::f64::consts::PI; use std::fmt; use std::fmt::{Display, Formatter}; impl_component![Cylinder]; impl_translate![Cylinder]; #[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize)] /// Cylinders can be capped in either or both ends. pub enum CylinderCap { Top, Bottom, Both, } impl Display for CylinderCap { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match *self { CylinderCap::Top => write!(f, "Top"), CylinderCap::Bottom => write!(f, "Bottom"), CylinderCap::Both => write!(f, "Both"), } } } #[derive(Clone, Debug, Deserialize, Serialize)] /// A 2D cylindrical surface. pub struct Cylinder { /// Name of cylinder in database. pub name: Option<String>, /// Optional residue placed at each coordinate. If not set the cylinder describes /// a general collection of coordinates. pub residue: Option<Residue>, /// lattice type used to construct the cylinder surface structure. pub lattice: LatticeType, /// The axis along which the cylinder is aligned. pub alignment: Direction, /// Cylinders can be capped at its ends. pub cap: Option<CylinderCap>, #[serde(skip)] /// Origin of the cylinder. Located in the center of the bottom. pub origin: Coord, #[serde(skip)] /// Radius of cylinder. pub radius: f64, #[serde(skip)] /// Height of cylinder. pub height: f64, #[serde(skip)] /// List of coordinates belonging to the cylinder. Relative to the `origin. pub coords: Vec<Coord>, } impl Cylinder { /// Construct the cylinder coordinates and return the object. /// /// # Errors /// Returns an error if either the radius or height is non-positive. pub fn construct(self) -> Result<Cylinder> { // Bend a `Sheet` of the chosen lattice type into the cylinder. let length = 2.0 * PI * self.radius; let width = self.height; let sheet = Sheet { name: None, residue: None, lattice: self.lattice.clone(), std_z: None, origin: Coord::default(), normal: Direction::Z, length, width, coords: vec![], }.construct()?; let final_radius = sheet.length / (2.0 * PI); let final_height = sheet.width; // The cylinder will be created aligned to the Y axis let mut coords: Vec<_> = sheet.coords .iter() .map(|coord| { let (x0, y, _) = coord.to_tuple(); let angle = (x0 * 360.0 / sheet.length).to_radians(); let x = final_radius * angle.sin(); let z = -final_radius * angle.cos(); Coord::new(x, y, z) }) .collect(); if let Some(cap) = self.cap { // The cylinder is aligned along the y axis. Construct a cap from // the same sheet and rotate it to match. let mut bottom = sheet.to_circle(final_radius); //.rotate(Direction::X); bottom.coords = rotate_planar_coords_to_alignment(&bottom.coords, Direction::Z, Direction::Y); // Get the top cap coordinates by shifting the bottom ones, not just the origin. let top_coords: Vec<_> = bottom.coords .iter() .map(|&coord| coord + Coord::new(0.0, final_height, 0.0)) .collect(); match cap { CylinderCap::Bottom => coords.extend_from_slice(&bottom.coords), CylinderCap::Top => coords.extend_from_slice(&top_coords), CylinderCap::Both => { coords.extend_from_slice(&bottom.coords); coords.extend_from_slice(&top_coords); } } } // Rotate the cylinder once along the x-axis to align them to the z-axis. Ok(Cylinder { alignment: Direction::Z, radius: final_radius, height: final_height, coords: rotate_coords(&coords, Direction::X), .. self }) } /// Calculate the box size. fn calc_box_size(&self) -> Coord { let diameter = 2.0 * self.radius; match self.alignment { Direction::X => Coord::new(self.height, diameter, diameter), Direction::Y => Coord::new(diameter, self.height, diameter), Direction::Z => Coord::new(diameter, diameter, self.height), } } } impl Describe for Cylinder { fn describe(&self) -> String { format!("{} (Cylinder surface of radius {:.2} and height {:.2} at {})", unwrap_name(&self.name), self.radius, self.height, self.origin) } fn describe_short(&self) -> String { format!("{} (Cylinder)", unwrap_name(&self.name)) } } #[cfg(test)] mod tests { use super::*; use surface::LatticeType::*; fn setup_cylinder(radius: f64, height: f64, lattice: &LatticeType) -> Cylinder { Cylinder { name: None, residue: None, lattice: lattice.clone(), alignment: Direction::Z, cap: None, origin: Coord::default(), radius, height, coords: vec![], } } #[test] fn cylinder_is_bent_from_sheet_as_expected() { let radius = 2.0; let height = 5.0; let density = 10.0; let lattice = PoissonDisc { density }; let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap(); // We should have a rough surface density match let expected = 2.0 * PI * radius * height * density; assert!((expected - cylinder.coords.len() as f64).abs() / expected < 0.1); // Not all coords should be at z = 0, ie. not still a sheet let sum_z = cylinder.coords.iter().map(|&Coord { x: _, y: _, z }| z.abs()).sum::<f64>(); assert!(sum_z > 0.0); // Currently the alignment should be along Z assert_eq!(Direction::Z, cylinder.alignment); // Rigorous test of coordinate structure for coord in cylinder.coords { let (r, h) = Coord::ORIGO.distance_cylindrical(coord, Direction::Z); assert!(r <= cylinder.radius); assert!(h >= 0.0 && h <= cylinder.height); } } #[test] fn cylinder_corrects_radius_and_height_to_match_lattice_spacing() { let radius = 1.0; // should give circumference = 2 * PI let height = 5.0; let a = 1.0; // not a match to the circumference let b = 1.1; // not a match to the height let lattice = Triclinic { a, b, gamma: 90.0 }; let cylinder = setup_cylinder(radius, height, &lattice).construct().unwrap(); assert_ne!(radius, cylinder.radius); assert_ne!(height, cylinder.height); // The best match to the circumference 2 * PI is the multiple 6 * a. assert_eq!(6.0 * a / (2.0 * PI), cylinder.radius); assert_eq!(5.0 * b, cylinder.height); } #[test] fn
() { let lattice = PoissonDisc { density: 10.0 }; assert!(setup_cylinder(-1.0, 1.0, &lattice).construct().is_err()); assert!(setup_cylinder(1.0, -1.0, &lattice).construct().is_err()); assert!(setup_cylinder(1.0, 1.0, &lattice).construct().is_ok()); } #[test] fn add_caps_to_cylinder() { let radius = 2.0; let height = 5.0; let lattice = Hexagonal { a: 0.1 }; let mut conf = setup_cylinder(radius, height, &lattice); // Without caps let cylinder = conf.clone().construct().unwrap(); let num_coords = cylinder.coords.len(); // With a bottom cap conf.cap = Some(CylinderCap::Bottom); let cylinder_cap = conf.clone().construct().unwrap(); // The first coordinates should be the original cylinder let (original, bottom) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); assert!(bottom.len() > 0); // All the bottom coordinates should be at z = 0 for coord in bottom { assert_eq!(coord.z, 0.0); } // A top cap conf.cap = Some(CylinderCap::Top); let cylinder_cap = conf.clone().construct().unwrap(); let (original, top) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); assert_eq!(top.len(), bottom.len()); // All the top coordinates should be at the cylinder height for coord in top { assert_eq!(coord.z, cylinder.height); } // Both caps conf.cap = Some(CylinderCap::Both); let cylinder_cap = conf.clone().construct().unwrap(); let (original, bottom_and_top) = cylinder_cap.coords.split_at(num_coords); assert_eq!(&original, &cylinder.coords.as_slice()); let (bottom_from_both, top_from_both) = bottom_and_top.split_at(bottom.len()); assert_eq!(bottom, bottom_from_both); assert_eq!(top, top_from_both); } #[test] fn calc_box_size_of_cylinder() { let radius = 2.0; let height = 5.0; let lattice = Hexagonal { a: 0.1 }; // Check each direction let mut cylinder = Cylinder { alignment: Direction::X, .. setup_cylinder(radius, height, &lattice) }; let diameter = 2.0 * radius; assert_eq!(Coord::new(height, diameter, diameter), cylinder.calc_box_size()); cylinder.alignment = Direction::Y; assert_eq!(Coord::new(diameter, height, diameter), cylinder.calc_box_size()); cylinder.alignment = Direction::Z; assert_eq!(Coord::new(diameter, diameter, height), cylinder.calc_box_size()); } }
constructing_cylinder_with_negative_radius_or_height_returns_error
identifier_name
lib.rs
#[macro_use] extern crate log; use io_partition::Partition; use std::fmt; use std::io; use std::io::{Read, Seek, SeekFrom}; fn get_bit(byte: u8, id: usize) -> Option<bool> { if id < 8 { Some((byte >> (7 - id) << 7) >= 1) } else { None } } #[derive(Debug)] pub enum PXError { IOError(io::Error), InvalidHeaderMagic([u8; 5]), InvalidDecompressedLength, FileToCompressTooLong(usize), } impl fmt::Display for PXError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::IOError(_) => write!(f, "An IO error happened"), Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value), Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"), Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght) } } } impl From<io::Error> for PXError { fn from(err: io::Error) -> Self { Self::IOError(err) } } #[derive(Debug)] struct ControlFlags { value: [u8; 9], } impl ControlFlags { fn new(value: [u8; 9]) -> ControlFlags { ControlFlags { value } } fn find(&self, nb_high: u8) -> Option<usize> { for v in 0..self.value.len() { if self.value[v] == nb_high { return Some(v); } } None } } fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> { let mut buf = [0; 2]; file.read_exact(&mut buf)?; Ok(u16::from_le_bytes(buf)) } fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> { let mut buf = [0; 4]; file.read_exact(&mut buf)?; Ok(u32::from_le_bytes(buf)) } fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> { let mut buf = [0]; file.read_exact(&mut buf)?; Ok(buf[0]) } /// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error) /// /// If atomatically determine if it is a pkdpx or an at4px based on the header /// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error. pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> { debug!("decompressing a px-compressed file file"); file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; let container_lenght = px_read_u16(&mut file)?; let mut control_flags_buffer = [0; 9]; file.read_exact(&mut control_flags_buffer)?; let control_flags = ControlFlags::new(control_flags_buffer); if &header_5 == b"PKDPX" { let decompressed_lenght = px_read_u32(&mut file)?; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 20, )?) } else if &header_5 == b"AT4PX" { let decompressed_lenght = px_read_u16(&mut file)? as u32; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 18, )?) } else { Err(PXError::InvalidHeaderMagic(header_5)) } } fn decompress_px_raw<T: Read + Seek>( mut file: T, control_flags: ControlFlags, decompressed_lenght: u32, container_lenght: u16, header_lenght: u64, ) -> Result<Vec<u8>, PXError> { let mut result = Vec::new(); let current_file_position = file.seek(SeekFrom::Current(0))?; let current_file_len = file.seek(SeekFrom::End(0))?; let mut raw_file = Partition::new( file, current_file_position, current_file_len - current_file_position, ) .unwrap(); trace!("starting decompression..."); 'main: loop { let mut bit_num = 0; let byte_info = px_read_u8(&mut raw_file)?; trace!("command byte: 0x{:x}", byte_info); while bit_num < 8 { let this_bit = get_bit(byte_info, bit_num).unwrap(); let this_byte = px_read_u8(&mut raw_file)?; if this_bit { trace!("bit is 1: pushing 0x{:2x}", this_byte); result.push(this_byte); } else { let nb_high: u8 = this_byte >> 4; let nb_low: u8 = this_byte << 4 >> 4; match control_flags.find(nb_high) { Some(ctrlflagindex) => { let byte_to_add = match ctrlflagindex { 0 => { let byte1 = (nb_low << 4) + nb_low; (byte1, byte1) } _ => { let mut nybbleval = nb_low; match ctrlflagindex { 1 => nybbleval += 1, 5 => nybbleval -= 1, _ => (), }; let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval); match ctrlflagindex { 1 => nybbles.0 -= 1, 2 => nybbles.1 -= 1, 3 => nybbles.2 -= 1, 4 => nybbles.3 -= 1, 5 => nybbles.0 += 1, 6 => nybbles.1 += 1, 7 => nybbles.2 += 1, 8 => nybbles.3 += 1, _ => panic!(), } ((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3) } }; trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1); result.push(byte_to_add.0); result.push(byte_to_add.1); } None => { let new_byte = px_read_u8(&mut raw_file)?; let offset_rel: i16 = -0x1000 + (((nb_low as i16) * 256) + (new_byte as i16)); let offset = (offset_rel as i32) + (result.len() as i32); let lenght = (nb_high as i32) + 3; trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte); // the old, good looking code /*result.seek(offset as u64); for c in result.read(lenght as u64)? { result.add_a_byte(c)?; }*/ //TODO: check for panic for c in offset..(offset + lenght) { result.push(result[c as usize]) } } } }; bit_num += 1; if result.len() >= decompressed_lenght as usize { break'main; }; } trace!("current output size : {}", result.len()); } trace!("decoding loop finished."); trace!( "expected container lenght: {}, read: {}", container_lenght, raw_file.seek(SeekFrom::Current(0))? + 20 ); trace!( "expected decompressed lenght: {}, real decompressed lenght: {}", decompressed_lenght, result.len() ); if container_lenght as u64!= raw_file.seek(SeekFrom::Current(0))? + header_lenght { return Err(PXError::InvalidDecompressedLength); }; Ok(result) } /// check if a file is a px-compressed filed (PKDPX or AT4PX). /// return true if it is one, false otherwise. /// /// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header) /// Also doesn't save the position of the cursor in the file pub fn
<F: Read + Seek>(file: &mut F) -> Result<bool, PXError> { if file.seek(SeekFrom::End(0))? < 4 { return Ok(false); }; file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; if &header_5 == b"PKDPX" { return Ok(true); }; if &header_5 == b"AT4PX" { return Ok(true); }; Ok(false) } /// use a naive compression algoritm to compress the input to a PKDPX file pub fn naive_compression<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> { let decompressed_size = file.seek(SeekFrom::End(0))?; file.seek(SeekFrom::Start(0))?; let mut result = Vec::new(); // header result.append(&mut b"PKDPX".to_vec()); // container_lenght result.append(&mut u16::to_le_bytes(0).to_vec()); //TODO: rewrite // control flags for _ in 0..9 { result.push(0); } // decompressed lenght result.append(&mut u32::to_le_bytes(decompressed_size as u32).to_vec()); let mut loop_nb = 0; loop { if loop_nb % 8 == 0 { result.push(0xFF); }; result.push(px_read_u8(&mut file)?); if file.seek(SeekFrom::Current(0))? >= decompressed_size { break; }; loop_nb += 1; } let container_lenght = result.len(); while result.len() % 16!= 0 { result.push(0xAA); } if container_lenght > (core::u16::MAX as usize) { return Err(PXError::FileToCompressTooLong(container_lenght)); }; let lenght_splice = u16::to_le_bytes(container_lenght as u16); result[5] = lenght_splice[0]; result[6] = lenght_splice[1]; Ok(result) }
is_px
identifier_name
lib.rs
#[macro_use] extern crate log; use io_partition::Partition; use std::fmt; use std::io; use std::io::{Read, Seek, SeekFrom}; fn get_bit(byte: u8, id: usize) -> Option<bool> { if id < 8 { Some((byte >> (7 - id) << 7) >= 1) } else { None } } #[derive(Debug)] pub enum PXError { IOError(io::Error), InvalidHeaderMagic([u8; 5]), InvalidDecompressedLength, FileToCompressTooLong(usize), } impl fmt::Display for PXError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::IOError(_) => write!(f, "An IO error happened"), Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value), Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"), Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght) } } } impl From<io::Error> for PXError { fn from(err: io::Error) -> Self { Self::IOError(err) } } #[derive(Debug)] struct ControlFlags { value: [u8; 9], } impl ControlFlags { fn new(value: [u8; 9]) -> ControlFlags { ControlFlags { value } } fn find(&self, nb_high: u8) -> Option<usize> { for v in 0..self.value.len() { if self.value[v] == nb_high { return Some(v); } } None } } fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> { let mut buf = [0; 2]; file.read_exact(&mut buf)?; Ok(u16::from_le_bytes(buf)) } fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> { let mut buf = [0; 4]; file.read_exact(&mut buf)?; Ok(u32::from_le_bytes(buf)) } fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> { let mut buf = [0]; file.read_exact(&mut buf)?; Ok(buf[0]) } /// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error) /// /// If atomatically determine if it is a pkdpx or an at4px based on the header /// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error. pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> { debug!("decompressing a px-compressed file file"); file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; let container_lenght = px_read_u16(&mut file)?; let mut control_flags_buffer = [0; 9]; file.read_exact(&mut control_flags_buffer)?; let control_flags = ControlFlags::new(control_flags_buffer); if &header_5 == b"PKDPX" { let decompressed_lenght = px_read_u32(&mut file)?; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 20, )?) } else if &header_5 == b"AT4PX" { let decompressed_lenght = px_read_u16(&mut file)? as u32; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 18, )?) } else { Err(PXError::InvalidHeaderMagic(header_5)) } } fn decompress_px_raw<T: Read + Seek>( mut file: T, control_flags: ControlFlags, decompressed_lenght: u32, container_lenght: u16, header_lenght: u64, ) -> Result<Vec<u8>, PXError> { let mut result = Vec::new(); let current_file_position = file.seek(SeekFrom::Current(0))?; let current_file_len = file.seek(SeekFrom::End(0))?; let mut raw_file = Partition::new( file, current_file_position, current_file_len - current_file_position, ) .unwrap(); trace!("starting decompression..."); 'main: loop { let mut bit_num = 0; let byte_info = px_read_u8(&mut raw_file)?; trace!("command byte: 0x{:x}", byte_info); while bit_num < 8 { let this_bit = get_bit(byte_info, bit_num).unwrap(); let this_byte = px_read_u8(&mut raw_file)?; if this_bit { trace!("bit is 1: pushing 0x{:2x}", this_byte); result.push(this_byte); } else { let nb_high: u8 = this_byte >> 4; let nb_low: u8 = this_byte << 4 >> 4; match control_flags.find(nb_high) { Some(ctrlflagindex) => { let byte_to_add = match ctrlflagindex { 0 => { let byte1 = (nb_low << 4) + nb_low; (byte1, byte1) } _ => { let mut nybbleval = nb_low; match ctrlflagindex { 1 => nybbleval += 1, 5 => nybbleval -= 1, _ => (), }; let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval); match ctrlflagindex { 1 => nybbles.0 -= 1, 2 => nybbles.1 -= 1, 3 => nybbles.2 -= 1, 4 => nybbles.3 -= 1, 5 => nybbles.0 += 1, 6 => nybbles.1 += 1, 7 => nybbles.2 += 1, 8 => nybbles.3 += 1, _ => panic!(), } ((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3) } }; trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1); result.push(byte_to_add.0); result.push(byte_to_add.1);
} None => { let new_byte = px_read_u8(&mut raw_file)?; let offset_rel: i16 = -0x1000 + (((nb_low as i16) * 256) + (new_byte as i16)); let offset = (offset_rel as i32) + (result.len() as i32); let lenght = (nb_high as i32) + 3; trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte); // the old, good looking code /*result.seek(offset as u64); for c in result.read(lenght as u64)? { result.add_a_byte(c)?; }*/ //TODO: check for panic for c in offset..(offset + lenght) { result.push(result[c as usize]) } } } }; bit_num += 1; if result.len() >= decompressed_lenght as usize { break'main; }; } trace!("current output size : {}", result.len()); } trace!("decoding loop finished."); trace!( "expected container lenght: {}, read: {}", container_lenght, raw_file.seek(SeekFrom::Current(0))? + 20 ); trace!( "expected decompressed lenght: {}, real decompressed lenght: {}", decompressed_lenght, result.len() ); if container_lenght as u64!= raw_file.seek(SeekFrom::Current(0))? + header_lenght { return Err(PXError::InvalidDecompressedLength); }; Ok(result) } /// check if a file is a px-compressed filed (PKDPX or AT4PX). /// return true if it is one, false otherwise. /// /// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header) /// Also doesn't save the position of the cursor in the file pub fn is_px<F: Read + Seek>(file: &mut F) -> Result<bool, PXError> { if file.seek(SeekFrom::End(0))? < 4 { return Ok(false); }; file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; if &header_5 == b"PKDPX" { return Ok(true); }; if &header_5 == b"AT4PX" { return Ok(true); }; Ok(false) } /// use a naive compression algoritm to compress the input to a PKDPX file pub fn naive_compression<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> { let decompressed_size = file.seek(SeekFrom::End(0))?; file.seek(SeekFrom::Start(0))?; let mut result = Vec::new(); // header result.append(&mut b"PKDPX".to_vec()); // container_lenght result.append(&mut u16::to_le_bytes(0).to_vec()); //TODO: rewrite // control flags for _ in 0..9 { result.push(0); } // decompressed lenght result.append(&mut u32::to_le_bytes(decompressed_size as u32).to_vec()); let mut loop_nb = 0; loop { if loop_nb % 8 == 0 { result.push(0xFF); }; result.push(px_read_u8(&mut file)?); if file.seek(SeekFrom::Current(0))? >= decompressed_size { break; }; loop_nb += 1; } let container_lenght = result.len(); while result.len() % 16!= 0 { result.push(0xAA); } if container_lenght > (core::u16::MAX as usize) { return Err(PXError::FileToCompressTooLong(container_lenght)); }; let lenght_splice = u16::to_le_bytes(container_lenght as u16); result[5] = lenght_splice[0]; result[6] = lenght_splice[1]; Ok(result) }
random_line_split
lib.rs
#[macro_use] extern crate log; use io_partition::Partition; use std::fmt; use std::io; use std::io::{Read, Seek, SeekFrom}; fn get_bit(byte: u8, id: usize) -> Option<bool> { if id < 8 { Some((byte >> (7 - id) << 7) >= 1) } else { None } } #[derive(Debug)] pub enum PXError { IOError(io::Error), InvalidHeaderMagic([u8; 5]), InvalidDecompressedLength, FileToCompressTooLong(usize), } impl fmt::Display for PXError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::IOError(_) => write!(f, "An IO error happened"), Self::InvalidHeaderMagic(value) => write!(f, "The header is invalid. It should either be PKDPX or AT4PX. The actual value of this header (in base 10) is {:?}", value), Self::InvalidDecompressedLength => write!(f, "The decompressed lenght doesn't correspond to what is indicated in the file"), Self::FileToCompressTooLong(lenght) => write!(f, "The file to compress is too long (real size: {}, max size: 256*256)", lenght) } } } impl From<io::Error> for PXError { fn from(err: io::Error) -> Self { Self::IOError(err) } } #[derive(Debug)] struct ControlFlags { value: [u8; 9], } impl ControlFlags { fn new(value: [u8; 9]) -> ControlFlags { ControlFlags { value } } fn find(&self, nb_high: u8) -> Option<usize> { for v in 0..self.value.len() { if self.value[v] == nb_high { return Some(v); } } None } } fn px_read_u16<T: Read>(file: &mut T) -> Result<u16, PXError> { let mut buf = [0; 2]; file.read_exact(&mut buf)?; Ok(u16::from_le_bytes(buf)) } fn px_read_u32<T: Read>(file: &mut T) -> Result<u32, PXError> { let mut buf = [0; 4]; file.read_exact(&mut buf)?; Ok(u32::from_le_bytes(buf)) } fn px_read_u8<T: Read>(file: &mut T) -> Result<u8, PXError> { let mut buf = [0]; file.read_exact(&mut buf)?; Ok(buf[0]) } /// decompress a pkdpx or at4px file. It take as input a Bytes buffer, and return a decompressed buffer (or an error) /// /// If atomatically determine if it is a pkdpx or an at4px based on the header /// If the file isn't the good lenght, it check if what is missing is a padding of a sir0. If it isn't, it return an error. pub fn decompress_px<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError>
)?) } else if &header_5 == b"AT4PX" { let decompressed_lenght = px_read_u16(&mut file)? as u32; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 18, )?) } else { Err(PXError::InvalidHeaderMagic(header_5)) } } fn decompress_px_raw<T: Read + Seek>( mut file: T, control_flags: ControlFlags, decompressed_lenght: u32, container_lenght: u16, header_lenght: u64, ) -> Result<Vec<u8>, PXError> { let mut result = Vec::new(); let current_file_position = file.seek(SeekFrom::Current(0))?; let current_file_len = file.seek(SeekFrom::End(0))?; let mut raw_file = Partition::new( file, current_file_position, current_file_len - current_file_position, ) .unwrap(); trace!("starting decompression..."); 'main: loop { let mut bit_num = 0; let byte_info = px_read_u8(&mut raw_file)?; trace!("command byte: 0x{:x}", byte_info); while bit_num < 8 { let this_bit = get_bit(byte_info, bit_num).unwrap(); let this_byte = px_read_u8(&mut raw_file)?; if this_bit { trace!("bit is 1: pushing 0x{:2x}", this_byte); result.push(this_byte); } else { let nb_high: u8 = this_byte >> 4; let nb_low: u8 = this_byte << 4 >> 4; match control_flags.find(nb_high) { Some(ctrlflagindex) => { let byte_to_add = match ctrlflagindex { 0 => { let byte1 = (nb_low << 4) + nb_low; (byte1, byte1) } _ => { let mut nybbleval = nb_low; match ctrlflagindex { 1 => nybbleval += 1, 5 => nybbleval -= 1, _ => (), }; let mut nybbles = (nybbleval, nybbleval, nybbleval, nybbleval); match ctrlflagindex { 1 => nybbles.0 -= 1, 2 => nybbles.1 -= 1, 3 => nybbles.2 -= 1, 4 => nybbles.3 -= 1, 5 => nybbles.0 += 1, 6 => nybbles.1 += 1, 7 => nybbles.2 += 1, 8 => nybbles.3 += 1, _ => panic!(), } ((nybbles.0 << 4) + nybbles.1, (nybbles.2 << 4) + nybbles.3) } }; trace!("bit is 0: ctrlflagindex is {:x}, nb_high is {:x}, nb_low is {:x}, adding 0x{:2x}{:2x}", ctrlflagindex, nb_high, nb_low, byte_to_add.0, byte_to_add.1); result.push(byte_to_add.0); result.push(byte_to_add.1); } None => { let new_byte = px_read_u8(&mut raw_file)?; let offset_rel: i16 = -0x1000 + (((nb_low as i16) * 256) + (new_byte as i16)); let offset = (offset_rel as i32) + (result.len() as i32); let lenght = (nb_high as i32) + 3; trace!("bit is 0: pushing from past, relative offset is {}, lenght is {} (nb_low:{}, nb_high:{}, new_byte:0x{:2x})", offset_rel, lenght, nb_low, nb_high, new_byte); // the old, good looking code /*result.seek(offset as u64); for c in result.read(lenght as u64)? { result.add_a_byte(c)?; }*/ //TODO: check for panic for c in offset..(offset + lenght) { result.push(result[c as usize]) } } } }; bit_num += 1; if result.len() >= decompressed_lenght as usize { break'main; }; } trace!("current output size : {}", result.len()); } trace!("decoding loop finished."); trace!( "expected container lenght: {}, read: {}", container_lenght, raw_file.seek(SeekFrom::Current(0))? + 20 ); trace!( "expected decompressed lenght: {}, real decompressed lenght: {}", decompressed_lenght, result.len() ); if container_lenght as u64!= raw_file.seek(SeekFrom::Current(0))? + header_lenght { return Err(PXError::InvalidDecompressedLength); }; Ok(result) } /// check if a file is a px-compressed filed (PKDPX or AT4PX). /// return true if it is one, false otherwise. /// /// It doesn't do extensive test and don't guaranty that the file is a valid PKDPX (only check the header) /// Also doesn't save the position of the cursor in the file pub fn is_px<F: Read + Seek>(file: &mut F) -> Result<bool, PXError> { if file.seek(SeekFrom::End(0))? < 4 { return Ok(false); }; file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; if &header_5 == b"PKDPX" { return Ok(true); }; if &header_5 == b"AT4PX" { return Ok(true); }; Ok(false) } /// use a naive compression algoritm to compress the input to a PKDPX file pub fn naive_compression<F: Read + Seek>(mut file: F) -> Result<Vec<u8>, PXError> { let decompressed_size = file.seek(SeekFrom::End(0))?; file.seek(SeekFrom::Start(0))?; let mut result = Vec::new(); // header result.append(&mut b"PKDPX".to_vec()); // container_lenght result.append(&mut u16::to_le_bytes(0).to_vec()); //TODO: rewrite // control flags for _ in 0..9 { result.push(0); } // decompressed lenght result.append(&mut u32::to_le_bytes(decompressed_size as u32).to_vec()); let mut loop_nb = 0; loop { if loop_nb % 8 == 0 { result.push(0xFF); }; result.push(px_read_u8(&mut file)?); if file.seek(SeekFrom::Current(0))? >= decompressed_size { break; }; loop_nb += 1; } let container_lenght = result.len(); while result.len() % 16!= 0 { result.push(0xAA); } if container_lenght > (core::u16::MAX as usize) { return Err(PXError::FileToCompressTooLong(container_lenght)); }; let lenght_splice = u16::to_le_bytes(container_lenght as u16); result[5] = lenght_splice[0]; result[6] = lenght_splice[1]; Ok(result) }
{ debug!("decompressing a px-compressed file file"); file.seek(SeekFrom::Start(0))?; let mut header_5 = [0; 5]; file.read_exact(&mut header_5)?; let container_lenght = px_read_u16(&mut file)?; let mut control_flags_buffer = [0; 9]; file.read_exact(&mut control_flags_buffer)?; let control_flags = ControlFlags::new(control_flags_buffer); if &header_5 == b"PKDPX" { let decompressed_lenght = px_read_u32(&mut file)?; Ok(decompress_px_raw( file, control_flags, decompressed_lenght, container_lenght, 20,
identifier_body
film.rs
use crate::core::geometry::point::{Point2i, Point2f}; use crate::core::spectrum::{Spectrum, xyz_to_rgb}; use crate::core::pbrt::{Float, Options, clamp, INFINITY}; use crate::core::filter::{Filters, Filter}; use crate::core::geometry::bounds::{Bounds2i, Bounds2f}; use crate::core::parallel::AtomicFloat; use std::sync::RwLock; use log::{info, error, warn}; use anyhow::Result; use crate::core::geometry::vector::Vector2f; use crate::core::paramset::ParamSet; use crate::core::imageio::write_image; use std::path::{PathBuf}; use smallvec::SmallVec; const FILTER_TABLE_WIDTH: usize = 16; #[derive(Default, Clone, Copy)] struct FilmTilePixel { contrib_sum : Spectrum, filter_weight_sum : Float } #[derive(Clone)] struct Pixel { xyz : [Float; 3], filter_weight_sum : Float, splat_xyz : [AtomicFloat; 3], _pad : Float } impl Default for Pixel { fn default() -> Self { Self { xyz: [0.0; 3], filter_weight_sum: 0.0, splat_xyz: [AtomicFloat::default(), AtomicFloat::default(), AtomicFloat::default()], _pad: 0.0 } } } pub struct Film { pub full_resolution : Point2i, pub diagonal : Float, pub filter : Filters, pub filename : PathBuf, pub cropped_pixel_bounds: Bounds2i, pixels : RwLock<Vec<Pixel>>, filter_table : [Float; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH], scale : Float, max_sample_luminance : Float } impl Film { pub fn new(resolution: &Point2i, crop_window: &Bounds2f, filt: Filters, diagonal: Float, filename: PathBuf, scale: Float, max_sample_luminance: Float) -> Self { let crop_pixel_bounds = Bounds2i::from_points( &Point2i::new( (resolution.x as Float * crop_window.p_min.x).ceil() as isize, (resolution.y as Float * crop_window.p_min.y).ceil() as isize), &Point2i::new( (resolution.x as Float * crop_window.p_max.x).ceil() as isize, (resolution.y as Float * crop_window.p_max.y).ceil() as isize ) ); info!("Created film with full resolution {}\ . Crop window of {} -> croppedPixelBounds {}", resolution, crop_window, crop_pixel_bounds); // Allocate film image storage let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize]; // TODO: filmPixelMemory // Precompute filter weight table let mut offset = 0; let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH]; for y in 0..FILTER_TABLE_WIDTH { for x in 0..FILTER_TABLE_WIDTH { let p = Point2f::new( (x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float, (y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float ); filter_table[offset] = filt.evaluate(&p); offset += 1; } } Self { full_resolution: *resolution, diagonal: diagonal * 0.001, filter: filt, filename: filename.to_owned(), scale, max_sample_luminance, cropped_pixel_bounds: crop_pixel_bounds, pixels: RwLock::new(pixels), filter_table } } pub fn get_sample_bounds(&self) -> Bounds2i { let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) + Vector2f::new(0.5, 0.5) - self.filter.radius()).floor(); let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) - Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil(); Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2)) } pub fn get_physical_extent(&self) -> Bounds2f { let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float; let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt(); let y = aspect * x; Bounds2f::new( &Point2f::new(-x / 2.0, -y / 2.0), &Point2f::new(x / 2.0, y / 2.0) ) } pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile { // Bound image pixels that samples in sampleBounds contribute to let half_pixel = Vector2f::new(0.5, 0.5); let float_bounds = Bounds2f { p_min: Point2f::from(sample_bounds.p_min), p_max: Point2f::from(sample_bounds.p_max) }; let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil(); let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor(); let p0 = Point2i::from(p0f); let p1 = Point2i::from(p1f) + Point2i::new(1, 1); let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds); FilmTile::new(&tile_bounds, &self.filter.radius(), &self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance) } pub fn merge_film_tile(&self, tile: &mut FilmTile) { // TODO: ProfilePhase let mut pixels = self.pixels.write().unwrap(); info!("Merging film tile {}", tile.pixel_bounds); for p in &tile.get_pixel_bounds() { // Merge pixel into Film::pixels let tile_pixel = tile.get_pixel(&p); let offset = self.get_pixel(&p); let merge_pixel = &mut pixels[offset]; let xyz = tile_pixel.contrib_sum.to_xyz(); for i in 0..3 { merge_pixel.xyz[i] += xyz[i]; } merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum; } } fn get_pixel(&self, p: &Point2i) -> usize { assert!(self.cropped_pixel_bounds.inside_exclusive(p)); let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x; let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width; offset as usize } pub fn set_image(&self, img: &[Spectrum]) { let npixels = self.cropped_pixel_bounds.area() as usize; let mut pixels = self.pixels.write().unwrap(); for i in 0..npixels { let p = &mut pixels[i]; p.xyz = img[i].to_xyz(); p.filter_weight_sum = 1.0; p.splat_xyz[0] = AtomicFloat::new(0.0); p.splat_xyz[1] = AtomicFloat::new(0.0); p.splat_xyz[2] = AtomicFloat::new(0.0); } } pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) { // TODO: ProfilePhase if v.has_nans() { error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y); return; } else if v.y() < 0.0 { error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y); return } else if v.y().is_infinite()
let pi = Point2i::from(p.floor()); if!self.cropped_pixel_bounds.inside_exclusive(&pi) { return; } if v.y() > self.max_sample_luminance { v *= self.max_sample_luminance / v.y(); } let mut pixels = self.pixels.write().unwrap(); let xyz = v.to_xyz(); let offset = self.get_pixel(&pi); let pixel = &mut pixels[offset]; for i in 0..3 { pixel.splat_xyz[i].add(xyz[i]); } } pub fn write_image(&self, splat_scale: Float) -> Result<()> { // Convert image to RGB and compute final pixel values info!("Converting image to RGB and computing final weighted pixel values"); let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize]; let mut offset: usize; for p in &self.cropped_pixel_bounds { // Convert pixel XYZ color to RGB offset = self.get_pixel(&p); let pixel = &self.pixels.read().unwrap()[offset]; let start = offset * 3; let xyz = xyz_to_rgb(pixel.xyz); rgb[start] = xyz[0]; rgb[start + 1] = xyz[1]; rgb[start + 2] = xyz[2]; // Normalize pixel with weight sum let filter_weight_sum = pixel.filter_weight_sum; if filter_weight_sum!= 0.0 { let invwt = 1.0 / filter_weight_sum; rgb[start] = (rgb[start] * invwt).max(0.0); rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0); rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0); } // splate value at pixel let splat_xyz: [Float; 3] = [ pixel.splat_xyz[0].clone().into(), pixel.splat_xyz[1].clone().into(), pixel.splat_xyz[2].clone().into() ]; let splat_rgb = xyz_to_rgb(splat_xyz); rgb[start] += splat_scale * splat_rgb[0]; rgb[start + 1] += splat_scale * splat_rgb[1]; rgb[start + 2] += splat_scale * splat_rgb[2]; // Scale pixel value by scale rgb[start] *= self.scale; rgb[start + 1] *= self.scale; rgb[start + 2] *= self.scale; } info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds); // TODO: WriteImage write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution) } } pub struct FilmTile<'a> { pub pixel_bounds : Bounds2i, filter_radius : Vector2f, inv_filter_radius : Vector2f, filter_table : &'a[Float], filter_table_size : usize, pixels : Vec<FilmTilePixel>, max_sample_luminance: Float } impl<'a> FilmTile<'a> { pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float], filter_table_size: usize, max_sample_luminance: Float) -> Self { Self { filter_table, filter_table_size, max_sample_luminance, pixel_bounds: *pixel_bounds, filter_radius: *filter_radius, inv_filter_radius: Vector2f::new(1.0 / filter_radius.x, 1.0 / filter_radius.y), pixels: vec![FilmTilePixel::default(); std::cmp::max(0, pixel_bounds.area() as usize)] } } pub fn add_sample(&mut self, pfilm: &Point2f, mut L: Spectrum, sample_weight: Float) { // TODO: ProfilePhase if L.y() > self.max_sample_luminance { L *= Spectrum::new(self.max_sample_luminance / L.y()); } // Compute sample's raster bounds; let pfilm_discrete = *pfilm - Vector2f::new(0.5, 0.5); let p0f = (pfilm_discrete - self.filter_radius).ceil(); let p1f = (pfilm_discrete + self.filter_radius).floor(); let mut p0 = Point2i::new(p0f.x as isize, p0f.y as isize); let mut p1 = Point2i::new(p1f.x as isize, p1f.y as isize) + Point2i::new(1, 1); p0 = p0.max(&self.pixel_bounds.p_min); p1 = p1.min(&self.pixel_bounds.p_max); // Loop over filter support and add sample to pixel arrays; let mut ifx: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.x as usize - p0.x as usize); let mut ify: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.y as usize - p0.y as usize); for x in p0.x..p1.x { let fx = ((x as Float - pfilm_discrete.x) * self.inv_filter_radius.x * self.filter_table_size as Float).abs(); ifx.push(std::cmp::min(fx.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { let fy = ((y as Float - pfilm_discrete.y) * self.inv_filter_radius.y * self.filter_table_size as Float).abs(); ify.push(std::cmp::min(fy.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { for x in p0.x..p1.x { // Evaluate filter value at (x, y) pixel let offset = ify[(y - p0.y) as usize] * self.filter_table_size + ifx[(x - p0.x) as usize]; let filter_weight = self.filter_table[offset]; // Update pixel values with filtered sample contribution let pixel = self.get_pixel(&Point2i::new(x, y)); pixel.contrib_sum += L * Spectrum::new(sample_weight) * Spectrum::new(filter_weight); pixel.filter_weight_sum += filter_weight; } } } fn get_pixel(&mut self, p: &Point2i) -> &mut FilmTilePixel { assert!(self.pixel_bounds.inside_exclusive(p)); let width = self.pixel_bounds.p_max.x - self.pixel_bounds.p_min.x; let offset = (p.x - self.pixel_bounds.p_min.x) + (p.y - self.pixel_bounds.p_min.y) * width; &mut self.pixels[offset as usize] } fn get_pixel_bounds(&self) -> Bounds2i { self.pixel_bounds } } pub fn create_film(params: &ParamSet, filter: Filters, opts: &Options) -> Film { let filename = if!opts.image_file.as_os_str().is_empty() { let params_filename = params.find_one_string("filename", "".to_owned()); if!params_filename.is_empty() { warn!("Output filename supplied on command line. \"{}\" is overriding \ filename provided in scene description file, \"{}\"", opts.image_file.display(), params_filename); } opts.image_file.clone() } else { let f = params.find_one_string("filename", "pbrt.exr".to_owned()); PathBuf::from(f) }; let mut xres = params.find_one_int("xresolution", 1280); let mut yres = params.find_one_int("yresolution", 720); if opts.quick_render { xres = std::cmp::max(1, xres / 4); yres = std::cmp::max(1, yres / 4); } let mut crop = Bounds2f::default(); let mut cwi = 0; let cr_some = params.find_float("cropwindow", &mut cwi); if cr_some.is_some() && cwi == 4 { let cr = cr_some.unwrap(); crop.p_min.x = clamp(cr[0].min(cr[1]), 0.0, 1.0); crop.p_max.x = clamp(cr[0].max(cr[1]), 0.0, 1.0); crop.p_min.y = clamp(cr[2].min(cr[3]), 0.0, 1.0); crop.p_max.y = clamp(cr[2].max(cr[3]), 0.0, 1.0); } else if cr_some.is_some() { error!("{} values supplised fir \"cropwindow\". Expected 4.", cwi); } else { crop = Bounds2f::new( &Point2f::new( clamp(opts.crop_window[0][0], 0.0, 1.0), clamp(opts.crop_window[1][0], 0.0, 1.0)), &Point2f::new( clamp(opts.crop_window[0][1], 0.0, 1.0), clamp(opts.crop_window[1][1], 0.0, 1.0) ) ); } let scale = params.find_one_float("scale", 1.0); let diagonal = params.find_one_float("diagonal", 35.0); let max_sample_luminance = params.find_one_float("maxsampleluminance", INFINITY); Film::new( &Point2i::new(xres, yres), &crop, filter, diagonal, filename, scale, max_sample_luminance) }
{ error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y); return; }
conditional_block
film.rs
use crate::core::geometry::point::{Point2i, Point2f}; use crate::core::spectrum::{Spectrum, xyz_to_rgb}; use crate::core::pbrt::{Float, Options, clamp, INFINITY}; use crate::core::filter::{Filters, Filter}; use crate::core::geometry::bounds::{Bounds2i, Bounds2f}; use crate::core::parallel::AtomicFloat; use std::sync::RwLock; use log::{info, error, warn}; use anyhow::Result; use crate::core::geometry::vector::Vector2f; use crate::core::paramset::ParamSet; use crate::core::imageio::write_image; use std::path::{PathBuf}; use smallvec::SmallVec; const FILTER_TABLE_WIDTH: usize = 16; #[derive(Default, Clone, Copy)] struct FilmTilePixel { contrib_sum : Spectrum, filter_weight_sum : Float } #[derive(Clone)] struct Pixel { xyz : [Float; 3], filter_weight_sum : Float, splat_xyz : [AtomicFloat; 3], _pad : Float } impl Default for Pixel { fn default() -> Self { Self { xyz: [0.0; 3], filter_weight_sum: 0.0, splat_xyz: [AtomicFloat::default(), AtomicFloat::default(), AtomicFloat::default()], _pad: 0.0 } } } pub struct Film { pub full_resolution : Point2i, pub diagonal : Float, pub filter : Filters, pub filename : PathBuf, pub cropped_pixel_bounds: Bounds2i, pixels : RwLock<Vec<Pixel>>, filter_table : [Float; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH], scale : Float, max_sample_luminance : Float } impl Film { pub fn new(resolution: &Point2i, crop_window: &Bounds2f, filt: Filters, diagonal: Float, filename: PathBuf, scale: Float, max_sample_luminance: Float) -> Self { let crop_pixel_bounds = Bounds2i::from_points( &Point2i::new( (resolution.x as Float * crop_window.p_min.x).ceil() as isize, (resolution.y as Float * crop_window.p_min.y).ceil() as isize), &Point2i::new( (resolution.x as Float * crop_window.p_max.x).ceil() as isize, (resolution.y as Float * crop_window.p_max.y).ceil() as isize ) ); info!("Created film with full resolution {}\ . Crop window of {} -> croppedPixelBounds {}", resolution, crop_window, crop_pixel_bounds); // Allocate film image storage let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize]; // TODO: filmPixelMemory // Precompute filter weight table let mut offset = 0; let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH]; for y in 0..FILTER_TABLE_WIDTH { for x in 0..FILTER_TABLE_WIDTH { let p = Point2f::new( (x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float, (y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float ); filter_table[offset] = filt.evaluate(&p); offset += 1; } } Self { full_resolution: *resolution, diagonal: diagonal * 0.001, filter: filt, filename: filename.to_owned(), scale, max_sample_luminance, cropped_pixel_bounds: crop_pixel_bounds, pixels: RwLock::new(pixels), filter_table } } pub fn get_sample_bounds(&self) -> Bounds2i { let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) + Vector2f::new(0.5, 0.5) - self.filter.radius()).floor(); let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) - Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil(); Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2)) } pub fn get_physical_extent(&self) -> Bounds2f { let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float; let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt(); let y = aspect * x; Bounds2f::new( &Point2f::new(-x / 2.0, -y / 2.0), &Point2f::new(x / 2.0, y / 2.0) ) } pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile { // Bound image pixels that samples in sampleBounds contribute to let half_pixel = Vector2f::new(0.5, 0.5); let float_bounds = Bounds2f { p_min: Point2f::from(sample_bounds.p_min), p_max: Point2f::from(sample_bounds.p_max) }; let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil(); let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor(); let p0 = Point2i::from(p0f); let p1 = Point2i::from(p1f) + Point2i::new(1, 1); let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds); FilmTile::new(&tile_bounds, &self.filter.radius(), &self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance) } pub fn merge_film_tile(&self, tile: &mut FilmTile) { // TODO: ProfilePhase let mut pixels = self.pixels.write().unwrap(); info!("Merging film tile {}", tile.pixel_bounds); for p in &tile.get_pixel_bounds() { // Merge pixel into Film::pixels let tile_pixel = tile.get_pixel(&p); let offset = self.get_pixel(&p); let merge_pixel = &mut pixels[offset]; let xyz = tile_pixel.contrib_sum.to_xyz(); for i in 0..3 { merge_pixel.xyz[i] += xyz[i]; } merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum; } } fn get_pixel(&self, p: &Point2i) -> usize { assert!(self.cropped_pixel_bounds.inside_exclusive(p)); let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x; let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width; offset as usize } pub fn set_image(&self, img: &[Spectrum]) { let npixels = self.cropped_pixel_bounds.area() as usize; let mut pixels = self.pixels.write().unwrap(); for i in 0..npixels { let p = &mut pixels[i]; p.xyz = img[i].to_xyz(); p.filter_weight_sum = 1.0; p.splat_xyz[0] = AtomicFloat::new(0.0); p.splat_xyz[1] = AtomicFloat::new(0.0); p.splat_xyz[2] = AtomicFloat::new(0.0); } } pub fn
(&self, p: &Point2f, mut v: Spectrum) { // TODO: ProfilePhase if v.has_nans() { error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y); return; } else if v.y() < 0.0 { error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y); return } else if v.y().is_infinite() { error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y); return; } let pi = Point2i::from(p.floor()); if!self.cropped_pixel_bounds.inside_exclusive(&pi) { return; } if v.y() > self.max_sample_luminance { v *= self.max_sample_luminance / v.y(); } let mut pixels = self.pixels.write().unwrap(); let xyz = v.to_xyz(); let offset = self.get_pixel(&pi); let pixel = &mut pixels[offset]; for i in 0..3 { pixel.splat_xyz[i].add(xyz[i]); } } pub fn write_image(&self, splat_scale: Float) -> Result<()> { // Convert image to RGB and compute final pixel values info!("Converting image to RGB and computing final weighted pixel values"); let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize]; let mut offset: usize; for p in &self.cropped_pixel_bounds { // Convert pixel XYZ color to RGB offset = self.get_pixel(&p); let pixel = &self.pixels.read().unwrap()[offset]; let start = offset * 3; let xyz = xyz_to_rgb(pixel.xyz); rgb[start] = xyz[0]; rgb[start + 1] = xyz[1]; rgb[start + 2] = xyz[2]; // Normalize pixel with weight sum let filter_weight_sum = pixel.filter_weight_sum; if filter_weight_sum!= 0.0 { let invwt = 1.0 / filter_weight_sum; rgb[start] = (rgb[start] * invwt).max(0.0); rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0); rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0); } // splate value at pixel let splat_xyz: [Float; 3] = [ pixel.splat_xyz[0].clone().into(), pixel.splat_xyz[1].clone().into(), pixel.splat_xyz[2].clone().into() ]; let splat_rgb = xyz_to_rgb(splat_xyz); rgb[start] += splat_scale * splat_rgb[0]; rgb[start + 1] += splat_scale * splat_rgb[1]; rgb[start + 2] += splat_scale * splat_rgb[2]; // Scale pixel value by scale rgb[start] *= self.scale; rgb[start + 1] *= self.scale; rgb[start + 2] *= self.scale; } info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds); // TODO: WriteImage write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution) } } pub struct FilmTile<'a> { pub pixel_bounds : Bounds2i, filter_radius : Vector2f, inv_filter_radius : Vector2f, filter_table : &'a[Float], filter_table_size : usize, pixels : Vec<FilmTilePixel>, max_sample_luminance: Float } impl<'a> FilmTile<'a> { pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float], filter_table_size: usize, max_sample_luminance: Float) -> Self { Self { filter_table, filter_table_size, max_sample_luminance, pixel_bounds: *pixel_bounds, filter_radius: *filter_radius, inv_filter_radius: Vector2f::new(1.0 / filter_radius.x, 1.0 / filter_radius.y), pixels: vec![FilmTilePixel::default(); std::cmp::max(0, pixel_bounds.area() as usize)] } } pub fn add_sample(&mut self, pfilm: &Point2f, mut L: Spectrum, sample_weight: Float) { // TODO: ProfilePhase if L.y() > self.max_sample_luminance { L *= Spectrum::new(self.max_sample_luminance / L.y()); } // Compute sample's raster bounds; let pfilm_discrete = *pfilm - Vector2f::new(0.5, 0.5); let p0f = (pfilm_discrete - self.filter_radius).ceil(); let p1f = (pfilm_discrete + self.filter_radius).floor(); let mut p0 = Point2i::new(p0f.x as isize, p0f.y as isize); let mut p1 = Point2i::new(p1f.x as isize, p1f.y as isize) + Point2i::new(1, 1); p0 = p0.max(&self.pixel_bounds.p_min); p1 = p1.min(&self.pixel_bounds.p_max); // Loop over filter support and add sample to pixel arrays; let mut ifx: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.x as usize - p0.x as usize); let mut ify: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.y as usize - p0.y as usize); for x in p0.x..p1.x { let fx = ((x as Float - pfilm_discrete.x) * self.inv_filter_radius.x * self.filter_table_size as Float).abs(); ifx.push(std::cmp::min(fx.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { let fy = ((y as Float - pfilm_discrete.y) * self.inv_filter_radius.y * self.filter_table_size as Float).abs(); ify.push(std::cmp::min(fy.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { for x in p0.x..p1.x { // Evaluate filter value at (x, y) pixel let offset = ify[(y - p0.y) as usize] * self.filter_table_size + ifx[(x - p0.x) as usize]; let filter_weight = self.filter_table[offset]; // Update pixel values with filtered sample contribution let pixel = self.get_pixel(&Point2i::new(x, y)); pixel.contrib_sum += L * Spectrum::new(sample_weight) * Spectrum::new(filter_weight); pixel.filter_weight_sum += filter_weight; } } } fn get_pixel(&mut self, p: &Point2i) -> &mut FilmTilePixel { assert!(self.pixel_bounds.inside_exclusive(p)); let width = self.pixel_bounds.p_max.x - self.pixel_bounds.p_min.x; let offset = (p.x - self.pixel_bounds.p_min.x) + (p.y - self.pixel_bounds.p_min.y) * width; &mut self.pixels[offset as usize] } fn get_pixel_bounds(&self) -> Bounds2i { self.pixel_bounds } } pub fn create_film(params: &ParamSet, filter: Filters, opts: &Options) -> Film { let filename = if!opts.image_file.as_os_str().is_empty() { let params_filename = params.find_one_string("filename", "".to_owned()); if!params_filename.is_empty() { warn!("Output filename supplied on command line. \"{}\" is overriding \ filename provided in scene description file, \"{}\"", opts.image_file.display(), params_filename); } opts.image_file.clone() } else { let f = params.find_one_string("filename", "pbrt.exr".to_owned()); PathBuf::from(f) }; let mut xres = params.find_one_int("xresolution", 1280); let mut yres = params.find_one_int("yresolution", 720); if opts.quick_render { xres = std::cmp::max(1, xres / 4); yres = std::cmp::max(1, yres / 4); } let mut crop = Bounds2f::default(); let mut cwi = 0; let cr_some = params.find_float("cropwindow", &mut cwi); if cr_some.is_some() && cwi == 4 { let cr = cr_some.unwrap(); crop.p_min.x = clamp(cr[0].min(cr[1]), 0.0, 1.0); crop.p_max.x = clamp(cr[0].max(cr[1]), 0.0, 1.0); crop.p_min.y = clamp(cr[2].min(cr[3]), 0.0, 1.0); crop.p_max.y = clamp(cr[2].max(cr[3]), 0.0, 1.0); } else if cr_some.is_some() { error!("{} values supplised fir \"cropwindow\". Expected 4.", cwi); } else { crop = Bounds2f::new( &Point2f::new( clamp(opts.crop_window[0][0], 0.0, 1.0), clamp(opts.crop_window[1][0], 0.0, 1.0)), &Point2f::new( clamp(opts.crop_window[0][1], 0.0, 1.0), clamp(opts.crop_window[1][1], 0.0, 1.0) ) ); } let scale = params.find_one_float("scale", 1.0); let diagonal = params.find_one_float("diagonal", 35.0); let max_sample_luminance = params.find_one_float("maxsampleluminance", INFINITY); Film::new( &Point2i::new(xres, yres), &crop, filter, diagonal, filename, scale, max_sample_luminance) }
add_splat
identifier_name
film.rs
use crate::core::geometry::point::{Point2i, Point2f}; use crate::core::spectrum::{Spectrum, xyz_to_rgb}; use crate::core::pbrt::{Float, Options, clamp, INFINITY}; use crate::core::filter::{Filters, Filter}; use crate::core::geometry::bounds::{Bounds2i, Bounds2f}; use crate::core::parallel::AtomicFloat; use std::sync::RwLock; use log::{info, error, warn}; use anyhow::Result; use crate::core::geometry::vector::Vector2f; use crate::core::paramset::ParamSet; use crate::core::imageio::write_image; use std::path::{PathBuf}; use smallvec::SmallVec; const FILTER_TABLE_WIDTH: usize = 16; #[derive(Default, Clone, Copy)] struct FilmTilePixel { contrib_sum : Spectrum, filter_weight_sum : Float } #[derive(Clone)] struct Pixel { xyz : [Float; 3], filter_weight_sum : Float, splat_xyz : [AtomicFloat; 3], _pad : Float } impl Default for Pixel { fn default() -> Self { Self { xyz: [0.0; 3], filter_weight_sum: 0.0, splat_xyz: [AtomicFloat::default(), AtomicFloat::default(), AtomicFloat::default()], _pad: 0.0 } } } pub struct Film { pub full_resolution : Point2i, pub diagonal : Float, pub filter : Filters, pub filename : PathBuf, pub cropped_pixel_bounds: Bounds2i, pixels : RwLock<Vec<Pixel>>, filter_table : [Float; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH], scale : Float, max_sample_luminance : Float } impl Film { pub fn new(resolution: &Point2i, crop_window: &Bounds2f, filt: Filters, diagonal: Float, filename: PathBuf, scale: Float, max_sample_luminance: Float) -> Self { let crop_pixel_bounds = Bounds2i::from_points( &Point2i::new( (resolution.x as Float * crop_window.p_min.x).ceil() as isize, (resolution.y as Float * crop_window.p_min.y).ceil() as isize), &Point2i::new( (resolution.x as Float * crop_window.p_max.x).ceil() as isize, (resolution.y as Float * crop_window.p_max.y).ceil() as isize ) ); info!("Created film with full resolution {}\ . Crop window of {} -> croppedPixelBounds {}", resolution, crop_window, crop_pixel_bounds); // Allocate film image storage let pixels = vec![Pixel::default(); crop_pixel_bounds.area() as usize]; // TODO: filmPixelMemory // Precompute filter weight table
let p = Point2f::new( (x as Float + 0.5) * filt.radius().x / FILTER_TABLE_WIDTH as Float, (y as Float + 0.5) * filt.radius().y / FILTER_TABLE_WIDTH as Float ); filter_table[offset] = filt.evaluate(&p); offset += 1; } } Self { full_resolution: *resolution, diagonal: diagonal * 0.001, filter: filt, filename: filename.to_owned(), scale, max_sample_luminance, cropped_pixel_bounds: crop_pixel_bounds, pixels: RwLock::new(pixels), filter_table } } pub fn get_sample_bounds(&self) -> Bounds2i { let p1 = (Point2f::from(self.cropped_pixel_bounds.p_min) + Vector2f::new(0.5, 0.5) - self.filter.radius()).floor(); let p2 = (Point2f::from(self.cropped_pixel_bounds.p_max) - Vector2f::new(0.5, 0.5) + self.filter.radius()).ceil(); Bounds2i::from_points(&Point2i::from(p1), &Point2i::from(p2)) } pub fn get_physical_extent(&self) -> Bounds2f { let aspect = self.full_resolution.y as Float / self.full_resolution.x as Float; let x = (self.diagonal * self.diagonal / (1.0 + aspect * aspect)).sqrt(); let y = aspect * x; Bounds2f::new( &Point2f::new(-x / 2.0, -y / 2.0), &Point2f::new(x / 2.0, y / 2.0) ) } pub fn get_film_tile(&self, sample_bounds: &Bounds2i) -> FilmTile { // Bound image pixels that samples in sampleBounds contribute to let half_pixel = Vector2f::new(0.5, 0.5); let float_bounds = Bounds2f { p_min: Point2f::from(sample_bounds.p_min), p_max: Point2f::from(sample_bounds.p_max) }; let p0f = (float_bounds.p_min - half_pixel - self.filter.radius()).ceil(); let p1f = (float_bounds.p_max - half_pixel + self.filter.radius()).floor(); let p0 = Point2i::from(p0f); let p1 = Point2i::from(p1f) + Point2i::new(1, 1); let tile_bounds = Bounds2i::from_points(&p0, &p1).intersect(&self.cropped_pixel_bounds); FilmTile::new(&tile_bounds, &self.filter.radius(), &self.filter_table, FILTER_TABLE_WIDTH, self.max_sample_luminance) } pub fn merge_film_tile(&self, tile: &mut FilmTile) { // TODO: ProfilePhase let mut pixels = self.pixels.write().unwrap(); info!("Merging film tile {}", tile.pixel_bounds); for p in &tile.get_pixel_bounds() { // Merge pixel into Film::pixels let tile_pixel = tile.get_pixel(&p); let offset = self.get_pixel(&p); let merge_pixel = &mut pixels[offset]; let xyz = tile_pixel.contrib_sum.to_xyz(); for i in 0..3 { merge_pixel.xyz[i] += xyz[i]; } merge_pixel.filter_weight_sum += tile_pixel.filter_weight_sum; } } fn get_pixel(&self, p: &Point2i) -> usize { assert!(self.cropped_pixel_bounds.inside_exclusive(p)); let width = self.cropped_pixel_bounds.p_max.x - self.cropped_pixel_bounds.p_min.x; let offset = (p.x - self.cropped_pixel_bounds.p_min.x) + (p.y - self.cropped_pixel_bounds.p_min.y) * width; offset as usize } pub fn set_image(&self, img: &[Spectrum]) { let npixels = self.cropped_pixel_bounds.area() as usize; let mut pixels = self.pixels.write().unwrap(); for i in 0..npixels { let p = &mut pixels[i]; p.xyz = img[i].to_xyz(); p.filter_weight_sum = 1.0; p.splat_xyz[0] = AtomicFloat::new(0.0); p.splat_xyz[1] = AtomicFloat::new(0.0); p.splat_xyz[2] = AtomicFloat::new(0.0); } } pub fn add_splat(&self, p: &Point2f, mut v: Spectrum) { // TODO: ProfilePhase if v.has_nans() { error!("Ignoring splatted spectrum with NaN values at ({}, {})", p.x, p.y); return; } else if v.y() < 0.0 { error!("Ignoring splatted spectrum with negative luminance {} at ({}, {})", v.y(), p.x, p.y); return } else if v.y().is_infinite() { error!("Ignoring slatted spectrum with infinite luminance at ({}, {})", p.x, p.y); return; } let pi = Point2i::from(p.floor()); if!self.cropped_pixel_bounds.inside_exclusive(&pi) { return; } if v.y() > self.max_sample_luminance { v *= self.max_sample_luminance / v.y(); } let mut pixels = self.pixels.write().unwrap(); let xyz = v.to_xyz(); let offset = self.get_pixel(&pi); let pixel = &mut pixels[offset]; for i in 0..3 { pixel.splat_xyz[i].add(xyz[i]); } } pub fn write_image(&self, splat_scale: Float) -> Result<()> { // Convert image to RGB and compute final pixel values info!("Converting image to RGB and computing final weighted pixel values"); let mut rgb = vec![0.0; (3 * self.cropped_pixel_bounds.area()) as usize]; let mut offset: usize; for p in &self.cropped_pixel_bounds { // Convert pixel XYZ color to RGB offset = self.get_pixel(&p); let pixel = &self.pixels.read().unwrap()[offset]; let start = offset * 3; let xyz = xyz_to_rgb(pixel.xyz); rgb[start] = xyz[0]; rgb[start + 1] = xyz[1]; rgb[start + 2] = xyz[2]; // Normalize pixel with weight sum let filter_weight_sum = pixel.filter_weight_sum; if filter_weight_sum!= 0.0 { let invwt = 1.0 / filter_weight_sum; rgb[start] = (rgb[start] * invwt).max(0.0); rgb[start + 1] = (rgb[start + 1] * invwt).max(0.0); rgb[start + 2] = (rgb[start + 2] * invwt).max(0.0); } // splate value at pixel let splat_xyz: [Float; 3] = [ pixel.splat_xyz[0].clone().into(), pixel.splat_xyz[1].clone().into(), pixel.splat_xyz[2].clone().into() ]; let splat_rgb = xyz_to_rgb(splat_xyz); rgb[start] += splat_scale * splat_rgb[0]; rgb[start + 1] += splat_scale * splat_rgb[1]; rgb[start + 2] += splat_scale * splat_rgb[2]; // Scale pixel value by scale rgb[start] *= self.scale; rgb[start + 1] *= self.scale; rgb[start + 2] *= self.scale; } info!("Writing image {} with bounds {}", self.filename.display(), self.cropped_pixel_bounds); // TODO: WriteImage write_image(&self.filename, &rgb, &self.cropped_pixel_bounds, &self.full_resolution) } } pub struct FilmTile<'a> { pub pixel_bounds : Bounds2i, filter_radius : Vector2f, inv_filter_radius : Vector2f, filter_table : &'a[Float], filter_table_size : usize, pixels : Vec<FilmTilePixel>, max_sample_luminance: Float } impl<'a> FilmTile<'a> { pub fn new(pixel_bounds: &Bounds2i, filter_radius: &Vector2f, filter_table: &'a[Float], filter_table_size: usize, max_sample_luminance: Float) -> Self { Self { filter_table, filter_table_size, max_sample_luminance, pixel_bounds: *pixel_bounds, filter_radius: *filter_radius, inv_filter_radius: Vector2f::new(1.0 / filter_radius.x, 1.0 / filter_radius.y), pixels: vec![FilmTilePixel::default(); std::cmp::max(0, pixel_bounds.area() as usize)] } } pub fn add_sample(&mut self, pfilm: &Point2f, mut L: Spectrum, sample_weight: Float) { // TODO: ProfilePhase if L.y() > self.max_sample_luminance { L *= Spectrum::new(self.max_sample_luminance / L.y()); } // Compute sample's raster bounds; let pfilm_discrete = *pfilm - Vector2f::new(0.5, 0.5); let p0f = (pfilm_discrete - self.filter_radius).ceil(); let p1f = (pfilm_discrete + self.filter_radius).floor(); let mut p0 = Point2i::new(p0f.x as isize, p0f.y as isize); let mut p1 = Point2i::new(p1f.x as isize, p1f.y as isize) + Point2i::new(1, 1); p0 = p0.max(&self.pixel_bounds.p_min); p1 = p1.min(&self.pixel_bounds.p_max); // Loop over filter support and add sample to pixel arrays; let mut ifx: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.x as usize - p0.x as usize); let mut ify: SmallVec<[usize; 16]> = SmallVec::with_capacity(p1.y as usize - p0.y as usize); for x in p0.x..p1.x { let fx = ((x as Float - pfilm_discrete.x) * self.inv_filter_radius.x * self.filter_table_size as Float).abs(); ifx.push(std::cmp::min(fx.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { let fy = ((y as Float - pfilm_discrete.y) * self.inv_filter_radius.y * self.filter_table_size as Float).abs(); ify.push(std::cmp::min(fy.floor() as usize, self.filter_table_size - 1)); } for y in p0.y..p1.y { for x in p0.x..p1.x { // Evaluate filter value at (x, y) pixel let offset = ify[(y - p0.y) as usize] * self.filter_table_size + ifx[(x - p0.x) as usize]; let filter_weight = self.filter_table[offset]; // Update pixel values with filtered sample contribution let pixel = self.get_pixel(&Point2i::new(x, y)); pixel.contrib_sum += L * Spectrum::new(sample_weight) * Spectrum::new(filter_weight); pixel.filter_weight_sum += filter_weight; } } } fn get_pixel(&mut self, p: &Point2i) -> &mut FilmTilePixel { assert!(self.pixel_bounds.inside_exclusive(p)); let width = self.pixel_bounds.p_max.x - self.pixel_bounds.p_min.x; let offset = (p.x - self.pixel_bounds.p_min.x) + (p.y - self.pixel_bounds.p_min.y) * width; &mut self.pixels[offset as usize] } fn get_pixel_bounds(&self) -> Bounds2i { self.pixel_bounds } } pub fn create_film(params: &ParamSet, filter: Filters, opts: &Options) -> Film { let filename = if!opts.image_file.as_os_str().is_empty() { let params_filename = params.find_one_string("filename", "".to_owned()); if!params_filename.is_empty() { warn!("Output filename supplied on command line. \"{}\" is overriding \ filename provided in scene description file, \"{}\"", opts.image_file.display(), params_filename); } opts.image_file.clone() } else { let f = params.find_one_string("filename", "pbrt.exr".to_owned()); PathBuf::from(f) }; let mut xres = params.find_one_int("xresolution", 1280); let mut yres = params.find_one_int("yresolution", 720); if opts.quick_render { xres = std::cmp::max(1, xres / 4); yres = std::cmp::max(1, yres / 4); } let mut crop = Bounds2f::default(); let mut cwi = 0; let cr_some = params.find_float("cropwindow", &mut cwi); if cr_some.is_some() && cwi == 4 { let cr = cr_some.unwrap(); crop.p_min.x = clamp(cr[0].min(cr[1]), 0.0, 1.0); crop.p_max.x = clamp(cr[0].max(cr[1]), 0.0, 1.0); crop.p_min.y = clamp(cr[2].min(cr[3]), 0.0, 1.0); crop.p_max.y = clamp(cr[2].max(cr[3]), 0.0, 1.0); } else if cr_some.is_some() { error!("{} values supplised fir \"cropwindow\". Expected 4.", cwi); } else { crop = Bounds2f::new( &Point2f::new( clamp(opts.crop_window[0][0], 0.0, 1.0), clamp(opts.crop_window[1][0], 0.0, 1.0)), &Point2f::new( clamp(opts.crop_window[0][1], 0.0, 1.0), clamp(opts.crop_window[1][1], 0.0, 1.0) ) ); } let scale = params.find_one_float("scale", 1.0); let diagonal = params.find_one_float("diagonal", 35.0); let max_sample_luminance = params.find_one_float("maxsampleluminance", INFINITY); Film::new( &Point2i::new(xres, yres), &crop, filter, diagonal, filename, scale, max_sample_luminance) }
let mut offset = 0; let mut filter_table = [0.0; FILTER_TABLE_WIDTH * FILTER_TABLE_WIDTH]; for y in 0..FILTER_TABLE_WIDTH { for x in 0..FILTER_TABLE_WIDTH {
random_line_split