file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
lib.rs
use std::{ alloc, alloc::Layout, fmt, fmt::Debug, iter::FromIterator, mem, ops::{Index, IndexMut}, ptr, ptr::NonNull, }; #[cfg(test)] pub mod test_box; #[cfg(test)] pub mod test_i32; #[cfg(test)] pub mod test_zst; pub mod iterator; use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator}; const GROWTH_RATE: f64 = 1.25; ///A resizable contiguous array of `T`. Does not allocate upon creation. pub struct Vector<T> { pub(crate) data: Option<NonNull<T>>, pub(crate) size: usize, pub(crate) capacity: usize, } impl<T> Default for Vector<T> { fn default() -> Self { Self::new() } } impl<T: Debug> Debug for Vector<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_empty() { return write!(f, "[]"); } write!(f, "[")?; for i in 0..(self.size - 1) { write!(f, "{:?}, ", self[i])?; } write!( f, "{:?}]", self.get(self.size - 1).expect("length already checked?") ) } } impl<T> Index<usize> for Vector<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index).expect("Index was out of bounds") } } impl<T> IndexMut<usize> for Vector<T> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { self.get_mut(index).expect("Index was out of bounds") } } impl<T> IntoIterator for Vector<T> { type Item = T; type IntoIter = VectorIterator<T>; fn into_iter(mut self) -> Self::IntoIter { let Vector { data, capacity, size, } = self; //Moves the pointer out of the vector so that the allocation // won't be freed at the end of this block. self.data = None; self.size = 0; VectorIterator { data, capacity, index: -1isize as usize, index_back: size, } } } impl<'a, T> IntoIterator for &'a Vector<T> { type Item = &'a T; type IntoIter = BorrowedVectorIterator<'a, T>; fn into_iter(self) -> Self::IntoIter { BorrowedVectorIterator { vector: &self, index: -1isize as usize, index_back: self.size, } } } impl<'a, T> IntoIterator for &'a mut Vector<T> { type Item = &'a mut T; type IntoIter = BorrowedVectorIteratorMut<'a, T>; fn into_iter(self) -> Self::IntoIter { let size = self.size; BorrowedVectorIteratorMut { vector: self, index: -1isize as usize, index_back: size, } } } impl<T> FromIterator<T> for Vector<T> { fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self { let iter = iter.into_iter(); let (min, _) = iter.size_hint(); let mut vec = Vector::with_capacity(min); for item in iter { vec.push(item); } vec } } impl<T> Drop for Vector<T> { fn drop(&mut self) { //Outside the loop to handle zero size types self.clear(); if let Some(ptr) = self.data { let ptr = ptr.as_ptr(); let layout = Layout::array::<T>(self.capacity) .expect("Cannot recreate layout. Has capacity been changed?"); //Safety: Capacity is only changed on reallocation, pointer is trusted // and iterators return to vectors for deallocation. unsafe { alloc::dealloc(ptr as *mut u8, layout) } } } } impl<T> Vector<T> { ///Creates a new vector. Does not allocate till it's needed. pub fn new() -> Self { let capacity = if mem::size_of::<T>() == 0 { usize::MAX } else { 0 }; Vector { data: None, size: 0, capacity, } } ///Creates a new vector with a preallocated buffer with space for `cap` elements. pub fn with_capacity(cap: usize) -> Self { let mut vec = Vector::new(); if mem::size_of::<T>()!= 0 { vec.reserve(cap); } vec } ///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not. pub fn is_empty(&self) -> bool { self.size == 0 } ///Returns the amount of elements stored in the vector. pub fn len(&self) -> usize { self.size } ///Allocates a new buffer for the vector of specified size. /// /// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity. fn reserve(&mut self, new_cap: usize) { assert_ne!( mem::size_of::<T>(), 0, "Vector currently doesn't support storing 0 sized types" ); let layout = Layout::array::<T>(new_cap).expect("Overflow"); //Safety: Layout is type and capacity checked. let new_ptr = unsafe { alloc::alloc(layout) as *mut T }; assert!( new_cap >= self.size, "New capacity can't contain current vector" ); assert!(!new_ptr.is_null()); let new_data = NonNull::new(new_ptr); if let Some(old_ptr) = self.data { unsafe { //Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap. ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size); //Safety: The pointer is only changed here in allocation. alloc::dealloc( old_ptr.as_ptr() as *mut u8, Layout::array::<T>(self.capacity) .expect("Cannot recreate layout? Has capacity been edited?"), ); } } self.data = new_data; self.capacity = new_cap; } ///Allocates a new buffer for the vector that is larger by `additional` elements. /// /// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity. pub fn reserve_additional(&mut self, additional: usize) { if mem::size_of::<T>() == 0 { return; } let new_cap = self .capacity .checked_add(additional) .expect("New size overflowed usize"); new_cap .checked_mul(mem::size_of::<T>()) .expect("New size overflowed usize"); self.reserve(new_cap); } ///Inserts an element at the back of the vector. /// /// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1). pub fn push(&mut self, elem: T) { if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } else if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } assert!(self.size < self.capacity); assert!(self.data.is_some() || (mem::size_of::<T>() == 0)); //Safety: Length is checked. If the allocation was already full it is reallocated above. unsafe { self.as_ptr_mut() .expect("Above assertion failed?") .add(self.size) .write(elem) }; self.size += 1; } ///Gets a reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get(&self, idx: usize) -> Option<&T>
///Gets a mutable reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr_mut()?.add(idx).as_mut() } } ///Inserts element in vector at index, moving everything after it to the right. /// Will reallocate if length equals capacity. /// /// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity. pub fn insert(&mut self, idx: usize, elem: T) { if idx == self.size { return self.push(elem); } if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } else if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } assert!(self.size < self.capacity); assert!(self.data.is_some() || mem::size_of::<T>() == 0); let data_ptr = self .as_ptr_mut() .expect("Vector's data pointer is null despite being just checked?"); for i in (idx..self.size).rev() { //Safety: Copies element by element within the size of the vector's allocation. // `self.size` keeps this within `self.size`. unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) }; } //Safety: The element that was here has been moved, this is guaranteed in bounds. unsafe { data_ptr.add(idx).write(elem) }; self.size += 1; } ///Removes the last element in the vector /// /// Returns `None` if the vector is empty. Has O(1) complexity. pub fn pop(&mut self) -> Option<T> { if self.size == 0 { return None; } self.size -= 1; let data_ptr = self.as_ptr_mut()?; //Safety: Existing pointer is trusted. Some(unsafe { data_ptr.add(self.size).read() }) } ///Removes the item at index, moving everything after that by one step to the left. /// If you're removing several elements, consider using the `retain` function for O(n) /// complexity instead of O(n²) /// /// Panics if index >= to the vector's length. Has O(n) complexity. pub fn remove(&mut self, idx: usize) -> T { if idx >= self.size { panic!("Index was out of bounds!"); } if idx == self.size { return self.pop().expect("Vector is empty"); } if self.size == 0 || (self.data.is_none() && mem::size_of::<T>()!= 0) { panic!("Vector is empty"); } let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?"); //Safety: Index is checked and pointer is trusted. let ret = unsafe { data_ptr.add(idx).read() }; for i in idx..(self.size - 1) { //Safety: Copies element by element within the size of the vector's allocation. // `self.size - 1 + 1` keeps this within `self.size`. unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) }; } self.size -= 1; ret } ///Removes every element in the vector. /// /// Has O(n) complexity. pub fn clear(&mut self) { while!self.is_empty() { self.pop(); } } ///Borrows the vector's allocation as an immutable slice. /// /// Has complexity O(1). pub fn as_slice(&self) -> &[T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts( self.as_ptr().expect("Cannot get pointer to create slice"), self.size, ) .as_ref() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &[] } } ///Borrows the vector's allocation as a mutable slice. /// /// Has complexity O(1). pub fn as_slice_mut(&mut self) -> &mut [T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts_mut( self.as_ptr_mut() .expect("Cannot get pointer to create slice"), self.size, ) .as_mut() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &mut [] } } ///Sets the length of the vector, within the existing capacity. /// /// Has complexity O(1). /// # Safety /// Panics if len is greater than the vector's capacity. /// Exposes potentially uninitialised memory if len is greater than the vector's length. pub unsafe fn set_len(&mut self, len: usize) { if len > self.capacity { panic!(); } self.size = len; } ///Returns an iterator over borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter(&self) -> BorrowedVectorIterator<'_, T> { (&self).into_iter() } ///Returns an iterator over mutably borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> { (self).into_iter() } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr(&self) -> Option<*const T> { if mem::size_of::<T>() == 0 { Some(self as *const Vector<T> as *const T) } else { self.data.map(|p| p.as_ptr() as *const _) } } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr_mut(&mut self) -> Option<*mut T> { if mem::size_of::<T>() == 0 { Some(self as *mut Vector<T> as *mut T) } else { self.data.map(|p| p.as_ptr()) } } ///Removes any element which does not fulfill the requirement passed. /// It is recommended to use this over `remove` in a loop due to time /// complexity and fewer moves. /// /// Has complexity O(n) pub fn retain(&mut self, f: fn(&T) -> bool) { if mem::size_of::<T>() == 0 { for i in (0..self.size).rev() { //Even if there is no data and the function can't actually depend // on the value of the element, the function might not be pure, // hence looping instead of one check and do nothing/clear all. if f(&self[i]) { self.pop(); } } return; } if self.data.is_none() { return; } let ptr = self.data.expect("Above check failed?").as_ptr(); let mut back = 0; for front in 0..self.size { let ok = f(&self[front]); if ok { if back!= front { //Safety: Element is moved within the allocated space (as front is // always greater than back and front is bound by size) without extra // copies or clones which would be required as you otherwise can't move // out of a vector. The element which was overwritten had already been // moved or dropped. unsafe { ptr.add(back).write(ptr.add(front).read()) }; back += 1; } } else { //Make sure drop is run and the element is not just left to be overwritten. let _ = unsafe { ptr.add(front).read() }; } } self.size = back; } }
{ if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr()?.add(idx).as_ref() } }
identifier_body
lib.rs
use std::{ alloc, alloc::Layout, fmt, fmt::Debug, iter::FromIterator, mem, ops::{Index, IndexMut}, ptr, ptr::NonNull, }; #[cfg(test)] pub mod test_box; #[cfg(test)] pub mod test_i32; #[cfg(test)] pub mod test_zst; pub mod iterator; use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator}; const GROWTH_RATE: f64 = 1.25; ///A resizable contiguous array of `T`. Does not allocate upon creation. pub struct Vector<T> { pub(crate) data: Option<NonNull<T>>, pub(crate) size: usize, pub(crate) capacity: usize, } impl<T> Default for Vector<T> { fn default() -> Self { Self::new() } } impl<T: Debug> Debug for Vector<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_empty() { return write!(f, "[]"); } write!(f, "[")?; for i in 0..(self.size - 1) { write!(f, "{:?}, ", self[i])?; } write!( f, "{:?}]", self.get(self.size - 1).expect("length already checked?") ) } } impl<T> Index<usize> for Vector<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index).expect("Index was out of bounds") } } impl<T> IndexMut<usize> for Vector<T> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { self.get_mut(index).expect("Index was out of bounds") } } impl<T> IntoIterator for Vector<T> { type Item = T; type IntoIter = VectorIterator<T>; fn into_iter(mut self) -> Self::IntoIter { let Vector { data, capacity, size, } = self; //Moves the pointer out of the vector so that the allocation // won't be freed at the end of this block. self.data = None; self.size = 0; VectorIterator { data, capacity, index: -1isize as usize, index_back: size, } } } impl<'a, T> IntoIterator for &'a Vector<T> { type Item = &'a T; type IntoIter = BorrowedVectorIterator<'a, T>; fn into_iter(self) -> Self::IntoIter { BorrowedVectorIterator { vector: &self, index: -1isize as usize, index_back: self.size, } } } impl<'a, T> IntoIterator for &'a mut Vector<T> { type Item = &'a mut T; type IntoIter = BorrowedVectorIteratorMut<'a, T>; fn into_iter(self) -> Self::IntoIter { let size = self.size; BorrowedVectorIteratorMut { vector: self, index: -1isize as usize, index_back: size, } } } impl<T> FromIterator<T> for Vector<T> { fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self { let iter = iter.into_iter(); let (min, _) = iter.size_hint(); let mut vec = Vector::with_capacity(min); for item in iter { vec.push(item); } vec } } impl<T> Drop for Vector<T> { fn drop(&mut self) { //Outside the loop to handle zero size types self.clear(); if let Some(ptr) = self.data { let ptr = ptr.as_ptr(); let layout = Layout::array::<T>(self.capacity) .expect("Cannot recreate layout. Has capacity been changed?"); //Safety: Capacity is only changed on reallocation, pointer is trusted // and iterators return to vectors for deallocation. unsafe { alloc::dealloc(ptr as *mut u8, layout) } } } } impl<T> Vector<T> { ///Creates a new vector. Does not allocate till it's needed. pub fn new() -> Self { let capacity = if mem::size_of::<T>() == 0 { usize::MAX } else { 0 }; Vector { data: None, size: 0, capacity, } } ///Creates a new vector with a preallocated buffer with space for `cap` elements. pub fn with_capacity(cap: usize) -> Self { let mut vec = Vector::new(); if mem::size_of::<T>()!= 0 { vec.reserve(cap); } vec } ///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not. pub fn is_empty(&self) -> bool { self.size == 0 } ///Returns the amount of elements stored in the vector. pub fn len(&self) -> usize { self.size } ///Allocates a new buffer for the vector of specified size. /// /// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity. fn reserve(&mut self, new_cap: usize) { assert_ne!( mem::size_of::<T>(), 0, "Vector currently doesn't support storing 0 sized types" ); let layout = Layout::array::<T>(new_cap).expect("Overflow"); //Safety: Layout is type and capacity checked. let new_ptr = unsafe { alloc::alloc(layout) as *mut T }; assert!( new_cap >= self.size, "New capacity can't contain current vector" ); assert!(!new_ptr.is_null()); let new_data = NonNull::new(new_ptr); if let Some(old_ptr) = self.data { unsafe { //Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap. ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size); //Safety: The pointer is only changed here in allocation. alloc::dealloc( old_ptr.as_ptr() as *mut u8, Layout::array::<T>(self.capacity) .expect("Cannot recreate layout? Has capacity been edited?"), ); } } self.data = new_data; self.capacity = new_cap; } ///Allocates a new buffer for the vector that is larger by `additional` elements. /// /// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity. pub fn reserve_additional(&mut self, additional: usize) { if mem::size_of::<T>() == 0 { return; } let new_cap = self .capacity .checked_add(additional) .expect("New size overflowed usize"); new_cap .checked_mul(mem::size_of::<T>()) .expect("New size overflowed usize"); self.reserve(new_cap); } ///Inserts an element at the back of the vector. /// /// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1). pub fn push(&mut self, elem: T) { if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } else if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } assert!(self.size < self.capacity); assert!(self.data.is_some() || (mem::size_of::<T>() == 0)); //Safety: Length is checked. If the allocation was already full it is reallocated above. unsafe { self.as_ptr_mut() .expect("Above assertion failed?") .add(self.size) .write(elem) }; self.size += 1; } ///Gets a reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get(&self, idx: usize) -> Option<&T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr()?.add(idx).as_ref() } } ///Gets a mutable reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr_mut()?.add(idx).as_mut() } } ///Inserts element in vector at index, moving everything after it to the right. /// Will reallocate if length equals capacity. /// /// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity. pub fn
(&mut self, idx: usize, elem: T) { if idx == self.size { return self.push(elem); } if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } else if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } assert!(self.size < self.capacity); assert!(self.data.is_some() || mem::size_of::<T>() == 0); let data_ptr = self .as_ptr_mut() .expect("Vector's data pointer is null despite being just checked?"); for i in (idx..self.size).rev() { //Safety: Copies element by element within the size of the vector's allocation. // `self.size` keeps this within `self.size`. unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) }; } //Safety: The element that was here has been moved, this is guaranteed in bounds. unsafe { data_ptr.add(idx).write(elem) }; self.size += 1; } ///Removes the last element in the vector /// /// Returns `None` if the vector is empty. Has O(1) complexity. pub fn pop(&mut self) -> Option<T> { if self.size == 0 { return None; } self.size -= 1; let data_ptr = self.as_ptr_mut()?; //Safety: Existing pointer is trusted. Some(unsafe { data_ptr.add(self.size).read() }) } ///Removes the item at index, moving everything after that by one step to the left. /// If you're removing several elements, consider using the `retain` function for O(n) /// complexity instead of O(n²) /// /// Panics if index >= to the vector's length. Has O(n) complexity. pub fn remove(&mut self, idx: usize) -> T { if idx >= self.size { panic!("Index was out of bounds!"); } if idx == self.size { return self.pop().expect("Vector is empty"); } if self.size == 0 || (self.data.is_none() && mem::size_of::<T>()!= 0) { panic!("Vector is empty"); } let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?"); //Safety: Index is checked and pointer is trusted. let ret = unsafe { data_ptr.add(idx).read() }; for i in idx..(self.size - 1) { //Safety: Copies element by element within the size of the vector's allocation. // `self.size - 1 + 1` keeps this within `self.size`. unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) }; } self.size -= 1; ret } ///Removes every element in the vector. /// /// Has O(n) complexity. pub fn clear(&mut self) { while!self.is_empty() { self.pop(); } } ///Borrows the vector's allocation as an immutable slice. /// /// Has complexity O(1). pub fn as_slice(&self) -> &[T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts( self.as_ptr().expect("Cannot get pointer to create slice"), self.size, ) .as_ref() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &[] } } ///Borrows the vector's allocation as a mutable slice. /// /// Has complexity O(1). pub fn as_slice_mut(&mut self) -> &mut [T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts_mut( self.as_ptr_mut() .expect("Cannot get pointer to create slice"), self.size, ) .as_mut() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &mut [] } } ///Sets the length of the vector, within the existing capacity. /// /// Has complexity O(1). /// # Safety /// Panics if len is greater than the vector's capacity. /// Exposes potentially uninitialised memory if len is greater than the vector's length. pub unsafe fn set_len(&mut self, len: usize) { if len > self.capacity { panic!(); } self.size = len; } ///Returns an iterator over borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter(&self) -> BorrowedVectorIterator<'_, T> { (&self).into_iter() } ///Returns an iterator over mutably borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> { (self).into_iter() } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr(&self) -> Option<*const T> { if mem::size_of::<T>() == 0 { Some(self as *const Vector<T> as *const T) } else { self.data.map(|p| p.as_ptr() as *const _) } } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr_mut(&mut self) -> Option<*mut T> { if mem::size_of::<T>() == 0 { Some(self as *mut Vector<T> as *mut T) } else { self.data.map(|p| p.as_ptr()) } } ///Removes any element which does not fulfill the requirement passed. /// It is recommended to use this over `remove` in a loop due to time /// complexity and fewer moves. /// /// Has complexity O(n) pub fn retain(&mut self, f: fn(&T) -> bool) { if mem::size_of::<T>() == 0 { for i in (0..self.size).rev() { //Even if there is no data and the function can't actually depend // on the value of the element, the function might not be pure, // hence looping instead of one check and do nothing/clear all. if f(&self[i]) { self.pop(); } } return; } if self.data.is_none() { return; } let ptr = self.data.expect("Above check failed?").as_ptr(); let mut back = 0; for front in 0..self.size { let ok = f(&self[front]); if ok { if back!= front { //Safety: Element is moved within the allocated space (as front is // always greater than back and front is bound by size) without extra // copies or clones which would be required as you otherwise can't move // out of a vector. The element which was overwritten had already been // moved or dropped. unsafe { ptr.add(back).write(ptr.add(front).read()) }; back += 1; } } else { //Make sure drop is run and the element is not just left to be overwritten. let _ = unsafe { ptr.add(front).read() }; } } self.size = back; } }
insert
identifier_name
lib.rs
use std::{ alloc, alloc::Layout, fmt, fmt::Debug, iter::FromIterator, mem, ops::{Index, IndexMut}, ptr, ptr::NonNull, }; #[cfg(test)] pub mod test_box; #[cfg(test)] pub mod test_i32; #[cfg(test)] pub mod test_zst; pub mod iterator; use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator}; const GROWTH_RATE: f64 = 1.25; ///A resizable contiguous array of `T`. Does not allocate upon creation. pub struct Vector<T> { pub(crate) data: Option<NonNull<T>>, pub(crate) size: usize, pub(crate) capacity: usize, } impl<T> Default for Vector<T> { fn default() -> Self { Self::new() } } impl<T: Debug> Debug for Vector<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_empty() { return write!(f, "[]"); } write!(f, "[")?; for i in 0..(self.size - 1) { write!(f, "{:?}, ", self[i])?; } write!( f, "{:?}]", self.get(self.size - 1).expect("length already checked?") ) } } impl<T> Index<usize> for Vector<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index).expect("Index was out of bounds") } } impl<T> IndexMut<usize> for Vector<T> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { self.get_mut(index).expect("Index was out of bounds") } } impl<T> IntoIterator for Vector<T> { type Item = T; type IntoIter = VectorIterator<T>; fn into_iter(mut self) -> Self::IntoIter { let Vector { data, capacity, size, } = self; //Moves the pointer out of the vector so that the allocation // won't be freed at the end of this block. self.data = None; self.size = 0; VectorIterator { data, capacity, index: -1isize as usize, index_back: size, } } } impl<'a, T> IntoIterator for &'a Vector<T> { type Item = &'a T; type IntoIter = BorrowedVectorIterator<'a, T>; fn into_iter(self) -> Self::IntoIter { BorrowedVectorIterator { vector: &self, index: -1isize as usize, index_back: self.size, } } } impl<'a, T> IntoIterator for &'a mut Vector<T> { type Item = &'a mut T; type IntoIter = BorrowedVectorIteratorMut<'a, T>; fn into_iter(self) -> Self::IntoIter { let size = self.size; BorrowedVectorIteratorMut { vector: self, index: -1isize as usize, index_back: size, } } } impl<T> FromIterator<T> for Vector<T> { fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self { let iter = iter.into_iter(); let (min, _) = iter.size_hint(); let mut vec = Vector::with_capacity(min); for item in iter { vec.push(item); } vec } } impl<T> Drop for Vector<T> { fn drop(&mut self) { //Outside the loop to handle zero size types self.clear(); if let Some(ptr) = self.data { let ptr = ptr.as_ptr(); let layout = Layout::array::<T>(self.capacity) .expect("Cannot recreate layout. Has capacity been changed?"); //Safety: Capacity is only changed on reallocation, pointer is trusted // and iterators return to vectors for deallocation. unsafe { alloc::dealloc(ptr as *mut u8, layout) } } } } impl<T> Vector<T> { ///Creates a new vector. Does not allocate till it's needed. pub fn new() -> Self { let capacity = if mem::size_of::<T>() == 0 { usize::MAX } else { 0 }; Vector { data: None, size: 0, capacity, } } ///Creates a new vector with a preallocated buffer with space for `cap` elements. pub fn with_capacity(cap: usize) -> Self { let mut vec = Vector::new(); if mem::size_of::<T>()!= 0 { vec.reserve(cap); } vec } ///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not. pub fn is_empty(&self) -> bool { self.size == 0 } ///Returns the amount of elements stored in the vector. pub fn len(&self) -> usize { self.size } ///Allocates a new buffer for the vector of specified size. /// /// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity. fn reserve(&mut self, new_cap: usize) { assert_ne!( mem::size_of::<T>(), 0, "Vector currently doesn't support storing 0 sized types" ); let layout = Layout::array::<T>(new_cap).expect("Overflow"); //Safety: Layout is type and capacity checked. let new_ptr = unsafe { alloc::alloc(layout) as *mut T }; assert!( new_cap >= self.size, "New capacity can't contain current vector" ); assert!(!new_ptr.is_null()); let new_data = NonNull::new(new_ptr); if let Some(old_ptr) = self.data { unsafe { //Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap. ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size); //Safety: The pointer is only changed here in allocation. alloc::dealloc( old_ptr.as_ptr() as *mut u8, Layout::array::<T>(self.capacity) .expect("Cannot recreate layout? Has capacity been edited?"), ); } } self.data = new_data; self.capacity = new_cap; } ///Allocates a new buffer for the vector that is larger by `additional` elements. /// /// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity. pub fn reserve_additional(&mut self, additional: usize) { if mem::size_of::<T>() == 0 { return; } let new_cap = self .capacity .checked_add(additional) .expect("New size overflowed usize"); new_cap .checked_mul(mem::size_of::<T>()) .expect("New size overflowed usize"); self.reserve(new_cap); } ///Inserts an element at the back of the vector. /// /// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1). pub fn push(&mut self, elem: T) { if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } else if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } assert!(self.size < self.capacity); assert!(self.data.is_some() || (mem::size_of::<T>() == 0)); //Safety: Length is checked. If the allocation was already full it is reallocated above. unsafe { self.as_ptr_mut() .expect("Above assertion failed?") .add(self.size) .write(elem) }; self.size += 1; } ///Gets a reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get(&self, idx: usize) -> Option<&T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr()?.add(idx).as_ref() } } ///Gets a mutable reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr_mut()?.add(idx).as_mut() } } ///Inserts element in vector at index, moving everything after it to the right. /// Will reallocate if length equals capacity. /// /// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity. pub fn insert(&mut self, idx: usize, elem: T) { if idx == self.size { return self.push(elem); } if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } else if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } assert!(self.size < self.capacity); assert!(self.data.is_some() || mem::size_of::<T>() == 0); let data_ptr = self .as_ptr_mut() .expect("Vector's data pointer is null despite being just checked?"); for i in (idx..self.size).rev() { //Safety: Copies element by element within the size of the vector's allocation. // `self.size` keeps this within `self.size`. unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) }; } //Safety: The element that was here has been moved, this is guaranteed in bounds. unsafe { data_ptr.add(idx).write(elem) }; self.size += 1; } ///Removes the last element in the vector /// /// Returns `None` if the vector is empty. Has O(1) complexity. pub fn pop(&mut self) -> Option<T> { if self.size == 0 { return None; } self.size -= 1; let data_ptr = self.as_ptr_mut()?; //Safety: Existing pointer is trusted. Some(unsafe { data_ptr.add(self.size).read() }) } ///Removes the item at index, moving everything after that by one step to the left. /// If you're removing several elements, consider using the `retain` function for O(n) /// complexity instead of O(n²) /// /// Panics if index >= to the vector's length. Has O(n) complexity. pub fn remove(&mut self, idx: usize) -> T { if idx >= self.size { panic!("Index was out of bounds!"); } if idx == self.size { return self.pop().expect("Vector is empty"); } if self.size == 0 || (self.data.is_none() && mem::size_of::<T>()!= 0) { panic!("Vector is empty"); } let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?"); //Safety: Index is checked and pointer is trusted. let ret = unsafe { data_ptr.add(idx).read() }; for i in idx..(self.size - 1) { //Safety: Copies element by element within the size of the vector's allocation. // `self.size - 1 + 1` keeps this within `self.size`. unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) }; } self.size -= 1; ret } ///Removes every element in the vector. /// /// Has O(n) complexity. pub fn clear(&mut self) { while!self.is_empty() { self.pop(); } } ///Borrows the vector's allocation as an immutable slice. /// /// Has complexity O(1). pub fn as_slice(&self) -> &[T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts( self.as_ptr().expect("Cannot get pointer to create slice"), self.size, ) .as_ref() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &[] } } ///Borrows the vector's allocation as a mutable slice. /// /// Has complexity O(1). pub fn as_slice_mut(&mut self) -> &mut [T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts_mut( self.as_ptr_mut() .expect("Cannot get pointer to create slice"), self.size, ) .as_mut() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &mut [] } } ///Sets the length of the vector, within the existing capacity. /// /// Has complexity O(1). /// # Safety /// Panics if len is greater than the vector's capacity. /// Exposes potentially uninitialised memory if len is greater than the vector's length. pub unsafe fn set_len(&mut self, len: usize) { if len > self.capacity { panic!(); } self.size = len; } ///Returns an iterator over borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter(&self) -> BorrowedVectorIterator<'_, T> { (&self).into_iter() } ///Returns an iterator over mutably borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> { (self).into_iter() } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr(&self) -> Option<*const T> { if mem::size_of::<T>() == 0 { Some(self as *const Vector<T> as *const T) } else { self.data.map(|p| p.as_ptr() as *const _) } } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr_mut(&mut self) -> Option<*mut T> { if mem::size_of::<T>() == 0 { Some(self as *mut Vector<T> as *mut T) } else {
} ///Removes any element which does not fulfill the requirement passed. /// It is recommended to use this over `remove` in a loop due to time /// complexity and fewer moves. /// /// Has complexity O(n) pub fn retain(&mut self, f: fn(&T) -> bool) { if mem::size_of::<T>() == 0 { for i in (0..self.size).rev() { //Even if there is no data and the function can't actually depend // on the value of the element, the function might not be pure, // hence looping instead of one check and do nothing/clear all. if f(&self[i]) { self.pop(); } } return; } if self.data.is_none() { return; } let ptr = self.data.expect("Above check failed?").as_ptr(); let mut back = 0; for front in 0..self.size { let ok = f(&self[front]); if ok { if back!= front { //Safety: Element is moved within the allocated space (as front is // always greater than back and front is bound by size) without extra // copies or clones which would be required as you otherwise can't move // out of a vector. The element which was overwritten had already been // moved or dropped. unsafe { ptr.add(back).write(ptr.add(front).read()) }; back += 1; } } else { //Make sure drop is run and the element is not just left to be overwritten. let _ = unsafe { ptr.add(front).read() }; } } self.size = back; } }
self.data.map(|p| p.as_ptr()) }
conditional_block
lib.rs
use std::{ alloc, alloc::Layout, fmt, fmt::Debug, iter::FromIterator, mem, ops::{Index, IndexMut}, ptr, ptr::NonNull, }; #[cfg(test)] pub mod test_box; #[cfg(test)] pub mod test_i32; #[cfg(test)] pub mod test_zst; pub mod iterator; use iterator::{BorrowedVectorIterator, BorrowedVectorIteratorMut, VectorIterator}; const GROWTH_RATE: f64 = 1.25; ///A resizable contiguous array of `T`. Does not allocate upon creation. pub struct Vector<T> { pub(crate) data: Option<NonNull<T>>, pub(crate) size: usize, pub(crate) capacity: usize, } impl<T> Default for Vector<T> { fn default() -> Self { Self::new() } } impl<T: Debug> Debug for Vector<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.is_empty() { return write!(f, "[]"); } write!(f, "[")?; for i in 0..(self.size - 1) { write!(f, "{:?}, ", self[i])?; } write!( f, "{:?}]", self.get(self.size - 1).expect("length already checked?") ) } } impl<T> Index<usize> for Vector<T> { type Output = T; fn index(&self, index: usize) -> &Self::Output { self.get(index).expect("Index was out of bounds") } } impl<T> IndexMut<usize> for Vector<T> { fn index_mut(&mut self, index: usize) -> &mut Self::Output { self.get_mut(index).expect("Index was out of bounds") } } impl<T> IntoIterator for Vector<T> { type Item = T; type IntoIter = VectorIterator<T>; fn into_iter(mut self) -> Self::IntoIter { let Vector { data, capacity, size, } = self; //Moves the pointer out of the vector so that the allocation // won't be freed at the end of this block. self.data = None; self.size = 0; VectorIterator { data, capacity, index: -1isize as usize, index_back: size, } } } impl<'a, T> IntoIterator for &'a Vector<T> { type Item = &'a T; type IntoIter = BorrowedVectorIterator<'a, T>; fn into_iter(self) -> Self::IntoIter { BorrowedVectorIterator { vector: &self, index: -1isize as usize, index_back: self.size, } } } impl<'a, T> IntoIterator for &'a mut Vector<T> { type Item = &'a mut T; type IntoIter = BorrowedVectorIteratorMut<'a, T>; fn into_iter(self) -> Self::IntoIter { let size = self.size; BorrowedVectorIteratorMut { vector: self, index: -1isize as usize, index_back: size, } } } impl<T> FromIterator<T> for Vector<T> { fn from_iter<A: IntoIterator<Item = T>>(iter: A) -> Self { let iter = iter.into_iter(); let (min, _) = iter.size_hint(); let mut vec = Vector::with_capacity(min); for item in iter { vec.push(item); } vec } } impl<T> Drop for Vector<T> { fn drop(&mut self) { //Outside the loop to handle zero size types self.clear(); if let Some(ptr) = self.data { let ptr = ptr.as_ptr(); let layout = Layout::array::<T>(self.capacity) .expect("Cannot recreate layout. Has capacity been changed?"); //Safety: Capacity is only changed on reallocation, pointer is trusted // and iterators return to vectors for deallocation. unsafe { alloc::dealloc(ptr as *mut u8, layout) } } } } impl<T> Vector<T> { ///Creates a new vector. Does not allocate till it's needed. pub fn new() -> Self { let capacity = if mem::size_of::<T>() == 0 { usize::MAX } else { 0 }; Vector { data: None, size: 0, capacity, } } ///Creates a new vector with a preallocated buffer with space for `cap` elements. pub fn with_capacity(cap: usize) -> Self { let mut vec = Vector::new(); if mem::size_of::<T>()!= 0 { vec.reserve(cap); } vec } ///Checks if the vector has no elements in it. Does not check if there is an allocated buffer or not. pub fn is_empty(&self) -> bool { self.size == 0 } ///Returns the amount of elements stored in the vector. pub fn len(&self) -> usize { self.size } ///Allocates a new buffer for the vector of specified size. /// /// Panics if `new_cap` is smaller than current size or overflows a `usize`. Has O(n) complexity. fn reserve(&mut self, new_cap: usize) { assert_ne!( mem::size_of::<T>(), 0, "Vector currently doesn't support storing 0 sized types" ); let layout = Layout::array::<T>(new_cap).expect("Overflow"); //Safety: Layout is type and capacity checked. let new_ptr = unsafe { alloc::alloc(layout) as *mut T }; assert!( new_cap >= self.size, "New capacity can't contain current vector" ); assert!(!new_ptr.is_null()); let new_data = NonNull::new(new_ptr); if let Some(old_ptr) = self.data { unsafe { //Safety: The new allocation is a seperate allocation, so the copy is guaranteed to not overlap. ptr::copy_nonoverlapping(old_ptr.as_ptr(), new_ptr, self.size); //Safety: The pointer is only changed here in allocation. alloc::dealloc( old_ptr.as_ptr() as *mut u8, Layout::array::<T>(self.capacity) .expect("Cannot recreate layout? Has capacity been edited?"), ); } } self.data = new_data; self.capacity = new_cap; } ///Allocates a new buffer for the vector that is larger by `additional` elements. /// /// Panics if `additional` causes it to overflow a `usize`. Has O(n) complexity. pub fn reserve_additional(&mut self, additional: usize) { if mem::size_of::<T>() == 0 { return; } let new_cap = self .capacity .checked_add(additional) .expect("New size overflowed usize"); new_cap .checked_mul(mem::size_of::<T>()) .expect("New size overflowed usize"); self.reserve(new_cap); } ///Inserts an element at the back of the vector. /// /// Panics if the length of the vector is equal to usize::MAX. Has complexity O(1). pub fn push(&mut self, elem: T) { if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } else if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } assert!(self.size < self.capacity); assert!(self.data.is_some() || (mem::size_of::<T>() == 0)); //Safety: Length is checked. If the allocation was already full it is reallocated above. unsafe {
self.as_ptr_mut() .expect("Above assertion failed?") .add(self.size) .write(elem) }; self.size += 1; } ///Gets a reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get(&self, idx: usize) -> Option<&T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr()?.add(idx).as_ref() } } ///Gets a mutable reference to the element at index's position. /// /// Returns `None` if index is greater than the length of the vector. Has complexity O(1). pub fn get_mut(&mut self, idx: usize) -> Option<&mut T> { if idx >= self.size { return None; } //Safety: Index is already checked. unsafe { self.as_ptr_mut()?.add(idx).as_mut() } } ///Inserts element in vector at index, moving everything after it to the right. /// Will reallocate if length equals capacity. /// /// Panics if the vector's length will overflow `usize::MAX`. Has O(n) complexity. pub fn insert(&mut self, idx: usize, elem: T) { if idx == self.size { return self.push(elem); } if self.size == self.capacity { if self.capacity == usize::MAX { panic!("Overflow"); } self.reserve( (self.capacity as f64 * GROWTH_RATE) .ceil() .min(usize::MAX as f64) as usize, ); } else if self.data.is_none() && mem::size_of::<T>()!= 0 { self.reserve(2); } assert!(self.size < self.capacity); assert!(self.data.is_some() || mem::size_of::<T>() == 0); let data_ptr = self .as_ptr_mut() .expect("Vector's data pointer is null despite being just checked?"); for i in (idx..self.size).rev() { //Safety: Copies element by element within the size of the vector's allocation. // `self.size` keeps this within `self.size`. unsafe { data_ptr.add(i + 1).write(data_ptr.add(i).read()) }; } //Safety: The element that was here has been moved, this is guaranteed in bounds. unsafe { data_ptr.add(idx).write(elem) }; self.size += 1; } ///Removes the last element in the vector /// /// Returns `None` if the vector is empty. Has O(1) complexity. pub fn pop(&mut self) -> Option<T> { if self.size == 0 { return None; } self.size -= 1; let data_ptr = self.as_ptr_mut()?; //Safety: Existing pointer is trusted. Some(unsafe { data_ptr.add(self.size).read() }) } ///Removes the item at index, moving everything after that by one step to the left. /// If you're removing several elements, consider using the `retain` function for O(n) /// complexity instead of O(n²) /// /// Panics if index >= to the vector's length. Has O(n) complexity. pub fn remove(&mut self, idx: usize) -> T { if idx >= self.size { panic!("Index was out of bounds!"); } if idx == self.size { return self.pop().expect("Vector is empty"); } if self.size == 0 || (self.data.is_none() && mem::size_of::<T>()!= 0) { panic!("Vector is empty"); } let data_ptr = self.as_ptr_mut().expect("Check above was incorrect?"); //Safety: Index is checked and pointer is trusted. let ret = unsafe { data_ptr.add(idx).read() }; for i in idx..(self.size - 1) { //Safety: Copies element by element within the size of the vector's allocation. // `self.size - 1 + 1` keeps this within `self.size`. unsafe { data_ptr.add(i).write(data_ptr.add(i + 1).read()) }; } self.size -= 1; ret } ///Removes every element in the vector. /// /// Has O(n) complexity. pub fn clear(&mut self) { while!self.is_empty() { self.pop(); } } ///Borrows the vector's allocation as an immutable slice. /// /// Has complexity O(1). pub fn as_slice(&self) -> &[T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts( self.as_ptr().expect("Cannot get pointer to create slice"), self.size, ) .as_ref() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &[] } } ///Borrows the vector's allocation as a mutable slice. /// /// Has complexity O(1). pub fn as_slice_mut(&mut self) -> &mut [T] { if self.data.is_some() || mem::size_of::<T>() == 0 { //Safety: Or existing pointer and size are trusted as they can't (safely) // be set from outside. unsafe { ptr::slice_from_raw_parts_mut( self.as_ptr_mut() .expect("Cannot get pointer to create slice"), self.size, ) .as_mut() .expect("Vector's internal NonNull pointer was null?") } } else { assert!(self.size == 0); &mut [] } } ///Sets the length of the vector, within the existing capacity. /// /// Has complexity O(1). /// # Safety /// Panics if len is greater than the vector's capacity. /// Exposes potentially uninitialised memory if len is greater than the vector's length. pub unsafe fn set_len(&mut self, len: usize) { if len > self.capacity { panic!(); } self.size = len; } ///Returns an iterator over borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter(&self) -> BorrowedVectorIterator<'_, T> { (&self).into_iter() } ///Returns an iterator over mutably borrowed elements of the vector. /// /// Has complexity O(1). pub fn iter_mut(&mut self) -> BorrowedVectorIteratorMut<'_, T> { (self).into_iter() } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr(&self) -> Option<*const T> { if mem::size_of::<T>() == 0 { Some(self as *const Vector<T> as *const T) } else { self.data.map(|p| p.as_ptr() as *const _) } } ///Returns the pointer to the allocation of the Vector or /// `None` if nothing has been allocated yet. /// /// Has complexity O(1). pub fn as_ptr_mut(&mut self) -> Option<*mut T> { if mem::size_of::<T>() == 0 { Some(self as *mut Vector<T> as *mut T) } else { self.data.map(|p| p.as_ptr()) } } ///Removes any element which does not fulfill the requirement passed. /// It is recommended to use this over `remove` in a loop due to time /// complexity and fewer moves. /// /// Has complexity O(n) pub fn retain(&mut self, f: fn(&T) -> bool) { if mem::size_of::<T>() == 0 { for i in (0..self.size).rev() { //Even if there is no data and the function can't actually depend // on the value of the element, the function might not be pure, // hence looping instead of one check and do nothing/clear all. if f(&self[i]) { self.pop(); } } return; } if self.data.is_none() { return; } let ptr = self.data.expect("Above check failed?").as_ptr(); let mut back = 0; for front in 0..self.size { let ok = f(&self[front]); if ok { if back!= front { //Safety: Element is moved within the allocated space (as front is // always greater than back and front is bound by size) without extra // copies or clones which would be required as you otherwise can't move // out of a vector. The element which was overwritten had already been // moved or dropped. unsafe { ptr.add(back).write(ptr.add(front).read()) }; back += 1; } } else { //Make sure drop is run and the element is not just left to be overwritten. let _ = unsafe { ptr.add(front).read() }; } } self.size = back; } }
random_line_split
all_phases.rs
use crate::ast2ir; use crate::emit; use crate::err::NiceError; use crate::ir2ast; use crate::opt; use crate::opt_ast; use crate::parse; use crate::swc_globals; macro_rules! case { ( $name:ident, $string:expr, @ $expected:literal ) => { #[test] fn $name() -> Result<(), NiceError> { swc_globals::with(|g| { let (ast, files) = parse::parse(g, $string)?; let ir = ast2ir::convert(g, ast); let ir = opt::run_passes(g, ir); let ast = ir2ast::convert( g, ir, ir2ast::Opt { inline: true, minify: false, }, ); let ast = opt_ast::run(g, ast, opt_ast::Opt { minify: false }); let js = emit::emit(g, ast, files, emit::Opt { minify: false })?; insta::assert_snapshot!(js, @ $expected); Ok(()) }) } }; } macro_rules! extern_case { ( $name:ident, $file:expr ) => { #[test] fn $name() -> Result<(), NiceError> { swc_globals::with(|g| { let (ast, files) = parse::parse(g, include_str!($file))?; let ir = ast2ir::convert(g, ast); let ir = opt::run_passes(g, ir); let ast = ir2ast::convert( g, ir, ir2ast::Opt { inline: true, minify: false, }, ); let ast = opt_ast::run(g, ast, opt_ast::Opt { minify: false }); let js = emit::emit(g, ast, files, emit::Opt { minify: false })?; insta::assert_snapshot!(stringify!($name), js); Ok(()) }) } }; } extern_case!(snudown_js, "js/snudown.js"); case!( basic, r#" function f(x) { while (true); x = y.bar; z.foo = x? true : 'hi'; return +[1 || x, { x }, f + 1, ++g]; } f(1), true; "#, @r###" (function f() { for(;;); var _val = y.bar; var _obj = z; var _val$1; _val$1 = _val? true : "hi"; _obj.foo = _val$1; var _wri = g + 1; g = _wri; return +[ 1, { x: _val }, f + 1, _wri]; })(1); "###); case!( assign_to_expr, r#" e |= 0; foo().x |= 1; "#, @r###" e = e | 0; var _obj = foo(); _obj.x = _obj.x | 1; "###); case!( labels, r#" outer: for (;;) { inner: for (;;) { if (foo) continue inner; if (bar) break outer; } } "#, @r###" outer: for(;;)inner: for(;;){ if (foo) continue inner; if (bar) break outer; } "###); case!( nested_no_side_effects, r#" let x = 1; if (foo) { g = just_read_global_state; } log(x); let y = 1; if (foo) { function maybe_change_y() { if (bar) y = 10; } maybe_change_y(); } log(y); "#, @r###" if (foo) g = just_read_global_state; log(1); var y = 1; if (foo) { if (bar) y = 10; } log(y); "###); case!( snudown_js_like, r#" var r; g = something; r || (r = {}); var s = {}; var o; for (o in r) s[o] = r[o]; r.x = 1; for (o in s) r[o] = s[o]; var stuff = (function(r_inner) { return { xy: r_inner.x * 2 }; })(r); var xy = stuff.xy; window.foo = function foo(z) { return z + xy; }; "#, @r###" g = something; window.foo = function(z) {
}; "###); case!( snudown_js_like2, r#" var o, c = {}, s = {}; for (o in c) c.hasOwnProperty(o) && (s[o] = c[o]); var u = console.log.bind(console), b = console.warn.bind(console); for (o in s) s.hasOwnProperty(o) && (c[o] = s[o]); s = null; var k, v, d, h = 0, w =!1; k = c.buffer? c.buffer : new ArrayBuffer(16777216), c.HEAP8 = v = new Int8Array(k), c.HEAP32 = s = new Int32Array(k), c.HEAPU8 = d = new Uint8Array(k), s[2340] = 5252272; var m = [], _ = [], p = [], y = []; c.preloadedImages = {}, c.preloadedAudios = {}, s = null, s = '\0\0\0\0\0'; var g = c._default_renderer = k._default_renderer, A = c._free = k._free; c._i64Add = k._i64Add, c._i64Subtract = k._i64Subtract; var C = c._wiki_renderer = k._wiki_renderer; c.establishStackSpace = k.establishStackSpace; var S, x = c.stackAlloc = k.stackAlloc, E = c.stackRestore = k.stackRestore, I = c.stackSave = k.stackSave; c.dynCall_iii = k.dynCall_iii, c.dynCall_iiii = k.dynCall_iiii, c.asm = k; s && (function (r) { var e, i = r.length; for (e = 0; e < i; ++e) d[8 + e] = r.charCodeAt(e) })(s); "#, @r###" console.log.bind(console); console.warn.bind(console); var _alt = new ArrayBuffer(16777216); new Int8Array(_alt); var _val = new Int32Array(_alt); var _val$1 = new Uint8Array(_alt); _val[2340] = 5252272; _alt._default_renderer; _alt._free; _alt._i64Add; _alt._i64Subtract; _alt._wiki_renderer; _alt.establishStackSpace; _alt.stackAlloc; _alt.stackRestore; _alt.stackSave; _alt.dynCall_iii; _alt.dynCall_iiii; var e = 0; for(; e < 5;){ var _prp = 8 + e; _val$1[_prp] = "\0\0\0\0\0".charCodeAt(e); e = e + 1; } "###); case!( fn_scopes_do_not_deter_ssa_inlining, r#" let x = foo(); function f() { something(); } g = x; f(); f(); "#, @r###" var _fun = function() { something(); }; g = foo(); _fun(); _fun(); "###); case!( inline_into_if_but_not_past_effects, r#" let x = g; if (foo) { log(x); } let y = h; if (bar()) { log(y); } i = function() { return x = y = 1; } "#, @r###" if (foo) log(g); var y = h; if (bar()) log(y); i = function() { y = 1; return 1; }; "###); case!( dont_inline_into_loop, r#" let x = g; do { log(x); g = 1; } while (foo); "#, @r###" var x = g; for(;;){ log(x); g = 1; if (foo) ; else break; } "###); case!( completely_redundant_var, r#" var x = 0; x += 1; var n = x; if (foo) { x += 1; log(x); } else { log(n); } "#, @r###" if (foo) log(2); else log(1); "###); case!( deconflict_nan, r#" g1 = 0 / 0; { let NaN = 1; if (foo) { NaN = 2; } g3 = NaN; } "#, @r###" g1 = NaN; var NaN$1 = 1; if (foo) NaN$1 = 2; g3 = NaN$1; "###); case!( referencing_outer_scope_moved_later, r#" var x; // converted to ssa, moved down to x = 0 g = function() { x(); }; x = foo; "#, @r###" g = function() { x(); }; var x = foo; "###); case!( referencing_outer_scope_moved_later2, r#" var x; // stays mutable, moved down to x = 0 g = function() { x(); }; x = foo; g2 = function() { x = 1; }; "#, @r###" g = function() { x(); }; var x = foo; g2 = function() { x = 1; }; "###); case!( mutually_recursive_fns, r#" function a() { b(); } function b() { c(); } function c() { a(); } g1 = a; g2 = b; g3 = c; "#, @r###" var _fun = function() { _fun$1(); }; var _fun$1 = function() { _fun$2(); }; var _fun$2 = function() { _fun(); }; g1 = _fun; g2 = _fun$1; g3 = _fun$2; "###); case!( fn_hoisting_toplevel, r#" foo(); function foo() { foo_(); } (function() { bar(); function bar() { bar_(); } })(); "#, @r###" foo_(); bar_(); "###); case!( fn_hoisting_blocks, r#" if (x) { foo(); function foo() { foo_(); } } foo(); "#, @r###" var foo; if (x) { void 0(); foo = function() { foo_(); }; } foo(); "###); case!( fn_hoisting_labelled, r#" foo(); label: function foo() { foo_(); } "#, @r###" var foo; label: foo = function() { foo_(); }; foo(); "###); case!( switch, r#" switch (x) { case 1: one(); break; case "foo": case bar: two(); default: def(); } "#, @r###" var _tst = bar; switch(x){ case 1: one(); break; case "foo": case _tst: two(); default: def(); } "###); case!( switch_scoping_forwards, r#" switch (x) { case 1: var v = 2; let l = 3; default: g1 = v; g2 = l; } "#, @r###" var v; switch(x){ case 1: v = 2; var l = 3; default: g1 = v; g2 = l; } "###); case!( switch_scoping_forwards_safe, r#" switch (x) { case 1: var v = 2; let l = 3; g1 = v; g2 = l; default: def(); } "#, @r###" switch(x){ case 1: g1 = 2; g2 = 3; default: def(); } "###); case!( switch_scoping_backwards, r#" switch (x) { case 1: g1 = v; g2 = l; break; default: var v = 2; let l = 3; } "#, @r###" var v; switch(x){ case 1: g1 = v; g2 = l; break; default: v = 2; var l = 3; } "###); case!( switch_dont_forward_past_cases, r#" switch (x) { case 1: let y = foo(); default: g = y; } "#, @r###" switch(x){ case 1: var y = foo(); default: g = y; } "###); case!( preserves_prop_calls, r#" console.log.bind(console); "#, @"console.log.bind(console); "); case!( inserts_parens_where_necessary, r#" g = (x + 1) * 2; (function f() { f(); })(); "#, @r###" g = (x + 1) * 2; (function f() { f(); })(); "###); case!( unreferenced_params_before_referenced, r#" g = function(a, b, c) { h = c; }; "#, @r###" g = function(_, _$1, c) { h = c; }; "###); case!( arg_shadow_fn_name_decl, r#" function f(f, a) { f(a); } g = f; "#, @r###" g = function(f, a) { f(a); }; "###); case!( arg_shadow_fn_name_expr, r#" g = function f(f, a) { f(a); }; "#, @r###" g = function(f, a) { f(a); }; "###); case!( switch_case_side_effects, r#" g = function(x) { var r = 10; switch (x) { default: def(); break; case r = 1337: leet(); break; case 123: abc(); break; } return r; }; "#, @r###" g = function(x) { switch(x){ default: def(); break; case 1337: leet(); break; case 123: abc(); break; } return 1337; }; "###);
return z + 2;
random_line_split
storage.rs
hash_size: None, chunk_pages: None, log_bps: dfl_params.log_bps, mem_avail_err_max: 0.1, mem_avail_inner_retries: 2, mem_avail_outer_retries: 2, first_try: true, mem_usage: 0, mem_probe_at: 0, prev_mem_avail: 0, } } } pub struct StorageBench {} impl Bench for StorageBench { fn desc(&self) -> BenchDesc { BenchDesc::new("storage", "Benchmark storage device with rd-hashd") .takes_run_props() .crit_mem_prot_only() } fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> { Ok(Box::new(StorageJob::parse(spec)?)) } fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> { const DOC: &[u8] = include_bytes!("../../doc/storage.md"); write!(out, "{}", String::from_utf8_lossy(DOC))?; Ok(()) } } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StorageRecord { pub period: (u64, u64), pub final_mem_probe_periods: Vec<(u64, u64)>, pub base_hashd_knobs: HashdKnobs, pub mem: MemInfo, pub mem_usages: Vec<f64>, pub mem_sizes: Vec<f64>, } #[derive(Clone, Serialize, Deserialize)] pub struct StorageResult { pub mem_offload_factor: f64, pub mem_usage: usize, pub mem_usage_stdev: usize, pub mem_size: usize, pub mem_size_stdev: usize, pub all_rstat: ResourceStat, pub final_rstat: ResourceStat, pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2], pub nr_reports: (u64, u64), } impl StorageJob { pub fn parse(spec: &JobSpec) -> Result<StorageJob> { let mut job = StorageJob::default(); for (k, v) in spec.props[0].iter() { match k.as_str() { "apply" => job.apply = v.len() == 0 || v.parse::<bool>()?, "commit" => job.commit = v.len() == 0 || v.parse::<bool>()?, "loops" => job.loops = v.parse::<u32>()?, "rps-max" => job.rps_max = Some(v.parse::<u32>()?), "hash-size" => job.hash_size = Some(parse_size(v)? as usize), "chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?), "log-bps" => job.log_bps = parse_size(v)?, "mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?, "mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?, "mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?, k => bail!("unknown property key {:?}", k), } } if job.commit { job.apply = true; } Ok(job) } fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize { match rep.usages.get(HASHD_BENCH_SVC_NAME) { Some(usage) => usage.mem_bytes as usize, None => 0, } } fn measure_supportable_memory_size( &mut self, rctx: &mut RunCtx, fake_cpu_bench: &HashdFakeCpuBench, ) -> Result<(usize, f64)> { fake_cpu_bench.start(rctx)?; const NR_MEM_USAGES: usize = 10; let mut mem_usages = VecDeque::<usize>::new(); let mut mem_avail_err: f64 = 0.0; rctx.wait_cond( |af, progress| { let cmd = &af.cmd.data; let bench = &af.bench.data; let rep = &af.report.data; // Use period max to avoid confusions from temporary drops // caused by e.g. bench completion. mem_usages.push_front(Self::hashd_mem_usage_rep(rep)); mem_usages.truncate(NR_MEM_USAGES); self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u)); self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64; if!rctx.test { let mem = rctx.mem_info(); mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64; } // Abort early iff we go over. Memory usage may keep rising // through refine stages, so we'll check for going under // after run completion. if mem_avail_err > self.mem_avail_err_max && rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect { return true; } progress.set_status(&format!( "[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}", rep.bench_hashd.phase.name(), format_size(rep.bench_hashd.mem_probe_size), format_size(self.mem_usage), mem_avail_err * 100.0, format_size_dashed(rep.usages[ROOT_SLICE].io_rbps), format_size_dashed(rep.usages[ROOT_SLICE].io_wbps), format_duration_dashed(rep.iolat.map["read"]["50"]), format_duration_dashed(rep.iolat.map["read"]["90"]), format_duration_dashed(rep.iolat.map["read"]["99"]), )); bench.hashd_seq >= cmd.bench_hashd_seq }, None, Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)), )?; rctx.stop_hashd_bench()?; if mem_avail_err > self.mem_avail_err_max { return Ok((0, mem_avail_err)); } let mem_size = rctx.access_agent_files(|af| { af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac }) as usize; Ok((mem_size, mem_avail_err)) } fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> { let mem = rctx.mem_info(); let cur_mem_avail = mem.avail + self.mem_usage - mem.target; let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs() < self.mem_avail_err_max * cur_mem_avail as f64; let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) { (true, _, _) => { warn!( "storage: Starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, true, _) => { warn!( "storage: mem_avail consistent with the last, \ starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, false, false) => { warn!("storage: Ran out of inner tries, starting over"); true } (false, false, true) => { warn!( "storage: Retrying without updating mem_avail {} (prev {}, cur {})", format_size(mem.avail), format_size(self.prev_mem_avail), format_size(cur_mem_avail) ); self.mem_avail_inner_retries -= 1; false } }; if retry_outer { rctx.update_mem_avail(cur_mem_avail)?; if self.mem_avail_outer_retries == 0 { bail!("available memory keeps fluctuating, keep the system idle"); } self.mem_avail_outer_retries -= 1; } self.prev_mem_avail = cur_mem_avail; self.first_try = false; Ok(retry_outer) } pub fn format_header<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, _res: &StorageResult, include_loops: bool, ) { write!( out, "Params: hash_size={} rps_max={} log_bps={}", format_size(rec.base_hashd_knobs.hash_size), self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max), format_size(self.log_bps) ) .unwrap(); if include_loops { writeln!(out, " loops={}", self.loops).unwrap(); } else { writeln!(out, "").unwrap(); } } fn format_rstat<'a>( &self, out: &mut Box<dyn Write + 'a>, _rec: &StorageRecord, res: &StorageResult, opts: &FormatOpts, ) { if opts.full { writeln!(out, "Resource stat:\n").unwrap(); res.all_rstat.format(out, "ALL", opts); writeln!(out, "").unwrap(); res.final_rstat.format(out, "FINAL", opts); writeln!(out, "").unwrap(); } writeln!( out, "IO BPS: read_final={} write_final={} read_all={} write_all={}", format_size(res.final_rstat.io_bps.0["mean"]), format_size(res.final_rstat.io_bps.1["mean"]), format_size(res.all_rstat.io_bps.0["mean"]), format_size(res.all_rstat.io_bps.1["mean"]) ) .unwrap(); } fn
<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, ) { write!( out, "Memory offloading: factor={:.3}@{} ", res.mem_offload_factor, rec.mem.profile ) .unwrap(); if self.loops > 1 { writeln!( out, "usage/stdev={}/{} size/stdev={}/{} missing={}%", format_size(res.mem_usage), format_size(res.mem_usage_stdev), format_size(res.mem_size), format_size(res.mem_size_stdev), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } else { writeln!( out, "usage={} size={} missing={}%", format_size(res.mem_usage), format_size(res.mem_size), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } } pub fn format_result<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, header: bool, opts: &FormatOpts, ) { if header { self.format_header(out, rec, res, true); writeln!(out, "").unwrap(); } StudyIoLatPcts::format_rw(out, &res.iolat, opts, None); writeln!(out, "").unwrap(); self.format_rstat(out, rec, res, opts); writeln!(out, "").unwrap(); self.format_mem_summary(out, rec, res); } } impl Job for StorageJob { fn sysreqs(&self) -> BTreeSet<SysReq> { HASHD_SYSREQS.clone() } fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> { rctx.set_prep_testfiles() .disable_zswap() .start_agent(vec![])?; // Depending on mem-profile, we might be using a large balloon which // can push down available memory below workload's memory.low // cratering memory reclaim. Make sure memory protection is off. We // aren't testing memory protection. rctx.access_agent_files(|af| { af.slices.data.disable_seqs.mem = af.report.data.seq; af.slices.save().unwrap(); }); let saved_mem_avail_inner_retries = self.mem_avail_inner_retries; let mut started_at; let mut final_mem_probe_periods = vec![]; let mut mem_usages = vec![]; let mut mem_sizes = vec![]; let mut fake_cpu_bench; 'outer: loop { final_mem_probe_periods.clear(); mem_usages.clear(); mem_sizes.clear(); self.mem_avail_inner_retries = saved_mem_avail_inner_retries; started_at = unix_now(); let base = HashdFakeCpuBench::base(rctx); fake_cpu_bench = HashdFakeCpuBench { rps_max: self.rps_max.unwrap_or(base.rps_max), hash_size: self.hash_size.unwrap_or(base.hash_size), chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages), log_bps: self.log_bps, ..base }; // We now know all the parameters. Let's run the actual benchmark. 'inner: loop { info!( "storage: Measuring supportable memory footprint and IO latencies ({}/{})", mem_sizes.len() + 1, self.loops ); let (mem_size, mem_avail_err) = self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?; // check for both going over and under, see the above function if mem_avail_err.abs() > self.mem_avail_err_max &&!rctx.test { warn!( "storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle", mem_avail_err * 100.0, self.mem_avail_err_max * 100.0 ); if self.process_retry(rctx)? { continue 'outer; } else { continue 'inner; } } else { self.prev_mem_avail = 0; self.first_try = false; } final_mem_probe_periods.push((self.mem_probe_at, unix_now())); mem_usages.push(self.mem_usage as f64); mem_sizes.push(mem_size as f64); info!( "storage: Supportable memory footprint {}", format_size(mem_size) ); if mem_sizes.len() >= self.loops as usize { break 'outer; } } } Ok(serde_json::to_value(&StorageRecord { period: (started_at, unix_now()), final_mem_probe_periods, base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()), mem: rctx.mem_info().clone(), mem_usages, mem_sizes, })?) } fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> { let rec: StorageRecord = parse_json_value_or_dump(rec_json)?; // Study and record the results. let all_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx); let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None); let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None); let mut studies = Studies::new() .add_multiple(&mut all_rstat_study.studies()) .add_multiple(&mut study_read_lat_pcts.studies()) .add_multiple(&mut study_write_lat_pcts.studies()); let nr_reports = studies.run(rctx, rec.period)?; let final_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx); let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies()); for (start, end) in rec.final_mem_probe_periods.iter() { studies.run(rctx, (*start, *end))?; } let mem_usage = statistical::mean(&rec.mem_usages); let mem_usage_stdev = if rec.mem_usages.len() > 1 { statistical::standard_deviation(&rec.mem_usages, None)
format_mem_summary
identifier_name
storage.rs
hash_size: None, chunk_pages: None, log_bps: dfl_params.log_bps, mem_avail_err_max: 0.1, mem_avail_inner_retries: 2, mem_avail_outer_retries: 2, first_try: true, mem_usage: 0, mem_probe_at: 0, prev_mem_avail: 0, } } } pub struct StorageBench {} impl Bench for StorageBench { fn desc(&self) -> BenchDesc { BenchDesc::new("storage", "Benchmark storage device with rd-hashd") .takes_run_props() .crit_mem_prot_only() } fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> { Ok(Box::new(StorageJob::parse(spec)?)) } fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> { const DOC: &[u8] = include_bytes!("../../doc/storage.md"); write!(out, "{}", String::from_utf8_lossy(DOC))?; Ok(()) } } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StorageRecord { pub period: (u64, u64), pub final_mem_probe_periods: Vec<(u64, u64)>, pub base_hashd_knobs: HashdKnobs, pub mem: MemInfo, pub mem_usages: Vec<f64>, pub mem_sizes: Vec<f64>, } #[derive(Clone, Serialize, Deserialize)] pub struct StorageResult { pub mem_offload_factor: f64, pub mem_usage: usize, pub mem_usage_stdev: usize, pub mem_size: usize, pub mem_size_stdev: usize, pub all_rstat: ResourceStat, pub final_rstat: ResourceStat, pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2], pub nr_reports: (u64, u64), } impl StorageJob { pub fn parse(spec: &JobSpec) -> Result<StorageJob> { let mut job = StorageJob::default(); for (k, v) in spec.props[0].iter() { match k.as_str() { "apply" => job.apply = v.len() == 0 || v.parse::<bool>()?, "commit" => job.commit = v.len() == 0 || v.parse::<bool>()?, "loops" => job.loops = v.parse::<u32>()?, "rps-max" => job.rps_max = Some(v.parse::<u32>()?), "hash-size" => job.hash_size = Some(parse_size(v)? as usize), "chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?), "log-bps" => job.log_bps = parse_size(v)?, "mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?, "mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?, "mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?, k => bail!("unknown property key {:?}", k), } } if job.commit { job.apply = true; } Ok(job) } fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize { match rep.usages.get(HASHD_BENCH_SVC_NAME) { Some(usage) => usage.mem_bytes as usize, None => 0, } } fn measure_supportable_memory_size( &mut self, rctx: &mut RunCtx, fake_cpu_bench: &HashdFakeCpuBench, ) -> Result<(usize, f64)> { fake_cpu_bench.start(rctx)?; const NR_MEM_USAGES: usize = 10; let mut mem_usages = VecDeque::<usize>::new(); let mut mem_avail_err: f64 = 0.0; rctx.wait_cond( |af, progress| { let cmd = &af.cmd.data; let bench = &af.bench.data; let rep = &af.report.data; // Use period max to avoid confusions from temporary drops // caused by e.g. bench completion. mem_usages.push_front(Self::hashd_mem_usage_rep(rep)); mem_usages.truncate(NR_MEM_USAGES); self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u)); self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64; if!rctx.test { let mem = rctx.mem_info(); mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64; } // Abort early iff we go over. Memory usage may keep rising // through refine stages, so we'll check for going under // after run completion. if mem_avail_err > self.mem_avail_err_max && rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect { return true; } progress.set_status(&format!( "[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}", rep.bench_hashd.phase.name(), format_size(rep.bench_hashd.mem_probe_size), format_size(self.mem_usage), mem_avail_err * 100.0, format_size_dashed(rep.usages[ROOT_SLICE].io_rbps), format_size_dashed(rep.usages[ROOT_SLICE].io_wbps), format_duration_dashed(rep.iolat.map["read"]["50"]), format_duration_dashed(rep.iolat.map["read"]["90"]), format_duration_dashed(rep.iolat.map["read"]["99"]), )); bench.hashd_seq >= cmd.bench_hashd_seq }, None, Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)), )?; rctx.stop_hashd_bench()?; if mem_avail_err > self.mem_avail_err_max { return Ok((0, mem_avail_err)); } let mem_size = rctx.access_agent_files(|af| { af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac }) as usize; Ok((mem_size, mem_avail_err)) } fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> { let mem = rctx.mem_info(); let cur_mem_avail = mem.avail + self.mem_usage - mem.target; let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs() < self.mem_avail_err_max * cur_mem_avail as f64; let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) { (true, _, _) => { warn!( "storage: Starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, true, _) => { warn!( "storage: mem_avail consistent with the last, \ starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, false, false) => { warn!("storage: Ran out of inner tries, starting over"); true } (false, false, true) => { warn!( "storage: Retrying without updating mem_avail {} (prev {}, cur {})", format_size(mem.avail), format_size(self.prev_mem_avail), format_size(cur_mem_avail) ); self.mem_avail_inner_retries -= 1; false } }; if retry_outer { rctx.update_mem_avail(cur_mem_avail)?; if self.mem_avail_outer_retries == 0 { bail!("available memory keeps fluctuating, keep the system idle"); } self.mem_avail_outer_retries -= 1; } self.prev_mem_avail = cur_mem_avail; self.first_try = false; Ok(retry_outer) } pub fn format_header<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, _res: &StorageResult, include_loops: bool, ) { write!( out, "Params: hash_size={} rps_max={} log_bps={}", format_size(rec.base_hashd_knobs.hash_size), self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max), format_size(self.log_bps) ) .unwrap(); if include_loops { writeln!(out, " loops={}", self.loops).unwrap(); } else { writeln!(out, "").unwrap(); } } fn format_rstat<'a>( &self, out: &mut Box<dyn Write + 'a>, _rec: &StorageRecord, res: &StorageResult, opts: &FormatOpts, ) { if opts.full { writeln!(out, "Resource stat:\n").unwrap(); res.all_rstat.format(out, "ALL", opts); writeln!(out, "").unwrap(); res.final_rstat.format(out, "FINAL", opts); writeln!(out, "").unwrap(); } writeln!( out, "IO BPS: read_final={} write_final={} read_all={} write_all={}", format_size(res.final_rstat.io_bps.0["mean"]), format_size(res.final_rstat.io_bps.1["mean"]), format_size(res.all_rstat.io_bps.0["mean"]), format_size(res.all_rstat.io_bps.1["mean"]) ) .unwrap(); } fn format_mem_summary<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, ) { write!( out, "Memory offloading: factor={:.3}@{} ", res.mem_offload_factor, rec.mem.profile ) .unwrap(); if self.loops > 1 { writeln!( out, "usage/stdev={}/{} size/stdev={}/{} missing={}%", format_size(res.mem_usage), format_size(res.mem_usage_stdev), format_size(res.mem_size), format_size(res.mem_size_stdev), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } else { writeln!( out, "usage={} size={} missing={}%", format_size(res.mem_usage), format_size(res.mem_size), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } } pub fn format_result<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, header: bool, opts: &FormatOpts, ) { if header { self.format_header(out, rec, res, true); writeln!(out, "").unwrap(); } StudyIoLatPcts::format_rw(out, &res.iolat, opts, None); writeln!(out, "").unwrap(); self.format_rstat(out, rec, res, opts); writeln!(out, "").unwrap(); self.format_mem_summary(out, rec, res); } } impl Job for StorageJob { fn sysreqs(&self) -> BTreeSet<SysReq> { HASHD_SYSREQS.clone() } fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> { rctx.set_prep_testfiles() .disable_zswap() .start_agent(vec![])?; // Depending on mem-profile, we might be using a large balloon which // can push down available memory below workload's memory.low // cratering memory reclaim. Make sure memory protection is off. We // aren't testing memory protection. rctx.access_agent_files(|af| { af.slices.data.disable_seqs.mem = af.report.data.seq; af.slices.save().unwrap(); }); let saved_mem_avail_inner_retries = self.mem_avail_inner_retries; let mut started_at; let mut final_mem_probe_periods = vec![]; let mut mem_usages = vec![]; let mut mem_sizes = vec![]; let mut fake_cpu_bench; 'outer: loop { final_mem_probe_periods.clear(); mem_usages.clear(); mem_sizes.clear(); self.mem_avail_inner_retries = saved_mem_avail_inner_retries; started_at = unix_now(); let base = HashdFakeCpuBench::base(rctx); fake_cpu_bench = HashdFakeCpuBench { rps_max: self.rps_max.unwrap_or(base.rps_max), hash_size: self.hash_size.unwrap_or(base.hash_size), chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages), log_bps: self.log_bps, ..base }; // We now know all the parameters. Let's run the actual benchmark. 'inner: loop { info!( "storage: Measuring supportable memory footprint and IO latencies ({}/{})", mem_sizes.len() + 1, self.loops ); let (mem_size, mem_avail_err) = self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?; // check for both going over and under, see the above function if mem_avail_err.abs() > self.mem_avail_err_max &&!rctx.test { warn!( "storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle", mem_avail_err * 100.0, self.mem_avail_err_max * 100.0 ); if self.process_retry(rctx)? {
continue 'outer; } else { continue 'inner; } } else { self.prev_mem_avail = 0; self.first_try = false; } final_mem_probe_periods.push((self.mem_probe_at, unix_now())); mem_usages.push(self.mem_usage as f64); mem_sizes.push(mem_size as f64); info!( "storage: Supportable memory footprint {}", format_size(mem_size) ); if mem_sizes.len() >= self.loops as usize { break 'outer; } } } Ok(serde_json::to_value(&StorageRecord { period: (started_at, unix_now()), final_mem_probe_periods, base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()), mem: rctx.mem_info().clone(), mem_usages, mem_sizes, })?) } fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> { let rec: StorageRecord = parse_json_value_or_dump(rec_json)?; // Study and record the results. let all_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx); let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None); let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None); let mut studies = Studies::new() .add_multiple(&mut all_rstat_study.studies()) .add_multiple(&mut study_read_lat_pcts.studies()) .add_multiple(&mut study_write_lat_pcts.studies()); let nr_reports = studies.run(rctx, rec.period)?; let final_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx); let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies()); for (start, end) in rec.final_mem_probe_periods.iter() { studies.run(rctx, (*start, *end))?; } let mem_usage = statistical::mean(&rec.mem_usages); let mem_usage_stdev = if rec.mem_usages.len() > 1 { statistical::standard_deviation(&rec.mem_usages, None)
random_line_split
storage.rs
hash_size: None, chunk_pages: None, log_bps: dfl_params.log_bps, mem_avail_err_max: 0.1, mem_avail_inner_retries: 2, mem_avail_outer_retries: 2, first_try: true, mem_usage: 0, mem_probe_at: 0, prev_mem_avail: 0, } } } pub struct StorageBench {} impl Bench for StorageBench { fn desc(&self) -> BenchDesc { BenchDesc::new("storage", "Benchmark storage device with rd-hashd") .takes_run_props() .crit_mem_prot_only() } fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> { Ok(Box::new(StorageJob::parse(spec)?)) } fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> { const DOC: &[u8] = include_bytes!("../../doc/storage.md"); write!(out, "{}", String::from_utf8_lossy(DOC))?; Ok(()) } } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StorageRecord { pub period: (u64, u64), pub final_mem_probe_periods: Vec<(u64, u64)>, pub base_hashd_knobs: HashdKnobs, pub mem: MemInfo, pub mem_usages: Vec<f64>, pub mem_sizes: Vec<f64>, } #[derive(Clone, Serialize, Deserialize)] pub struct StorageResult { pub mem_offload_factor: f64, pub mem_usage: usize, pub mem_usage_stdev: usize, pub mem_size: usize, pub mem_size_stdev: usize, pub all_rstat: ResourceStat, pub final_rstat: ResourceStat, pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2], pub nr_reports: (u64, u64), } impl StorageJob { pub fn parse(spec: &JobSpec) -> Result<StorageJob> { let mut job = StorageJob::default(); for (k, v) in spec.props[0].iter() { match k.as_str() { "apply" => job.apply = v.len() == 0 || v.parse::<bool>()?, "commit" => job.commit = v.len() == 0 || v.parse::<bool>()?, "loops" => job.loops = v.parse::<u32>()?, "rps-max" => job.rps_max = Some(v.parse::<u32>()?), "hash-size" => job.hash_size = Some(parse_size(v)? as usize), "chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?), "log-bps" => job.log_bps = parse_size(v)?, "mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?, "mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?, "mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?, k => bail!("unknown property key {:?}", k), } } if job.commit { job.apply = true; } Ok(job) } fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize { match rep.usages.get(HASHD_BENCH_SVC_NAME) { Some(usage) => usage.mem_bytes as usize, None => 0, } } fn measure_supportable_memory_size( &mut self, rctx: &mut RunCtx, fake_cpu_bench: &HashdFakeCpuBench, ) -> Result<(usize, f64)> { fake_cpu_bench.start(rctx)?; const NR_MEM_USAGES: usize = 10; let mut mem_usages = VecDeque::<usize>::new(); let mut mem_avail_err: f64 = 0.0; rctx.wait_cond( |af, progress| { let cmd = &af.cmd.data; let bench = &af.bench.data; let rep = &af.report.data; // Use period max to avoid confusions from temporary drops // caused by e.g. bench completion. mem_usages.push_front(Self::hashd_mem_usage_rep(rep)); mem_usages.truncate(NR_MEM_USAGES); self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u)); self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64; if!rctx.test { let mem = rctx.mem_info(); mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64; } // Abort early iff we go over. Memory usage may keep rising // through refine stages, so we'll check for going under // after run completion. if mem_avail_err > self.mem_avail_err_max && rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect { return true; } progress.set_status(&format!( "[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}", rep.bench_hashd.phase.name(), format_size(rep.bench_hashd.mem_probe_size), format_size(self.mem_usage), mem_avail_err * 100.0, format_size_dashed(rep.usages[ROOT_SLICE].io_rbps), format_size_dashed(rep.usages[ROOT_SLICE].io_wbps), format_duration_dashed(rep.iolat.map["read"]["50"]), format_duration_dashed(rep.iolat.map["read"]["90"]), format_duration_dashed(rep.iolat.map["read"]["99"]), )); bench.hashd_seq >= cmd.bench_hashd_seq }, None, Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)), )?; rctx.stop_hashd_bench()?; if mem_avail_err > self.mem_avail_err_max { return Ok((0, mem_avail_err)); } let mem_size = rctx.access_agent_files(|af| { af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac }) as usize; Ok((mem_size, mem_avail_err)) } fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> { let mem = rctx.mem_info(); let cur_mem_avail = mem.avail + self.mem_usage - mem.target; let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs() < self.mem_avail_err_max * cur_mem_avail as f64; let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) { (true, _, _) => { warn!( "storage: Starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, true, _) => { warn!( "storage: mem_avail consistent with the last, \ starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, false, false) => { warn!("storage: Ran out of inner tries, starting over"); true } (false, false, true) => { warn!( "storage: Retrying without updating mem_avail {} (prev {}, cur {})", format_size(mem.avail), format_size(self.prev_mem_avail), format_size(cur_mem_avail) ); self.mem_avail_inner_retries -= 1; false } }; if retry_outer { rctx.update_mem_avail(cur_mem_avail)?; if self.mem_avail_outer_retries == 0 { bail!("available memory keeps fluctuating, keep the system idle"); } self.mem_avail_outer_retries -= 1; } self.prev_mem_avail = cur_mem_avail; self.first_try = false; Ok(retry_outer) } pub fn format_header<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, _res: &StorageResult, include_loops: bool, ) { write!( out, "Params: hash_size={} rps_max={} log_bps={}", format_size(rec.base_hashd_knobs.hash_size), self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max), format_size(self.log_bps) ) .unwrap(); if include_loops { writeln!(out, " loops={}", self.loops).unwrap(); } else { writeln!(out, "").unwrap(); } } fn format_rstat<'a>( &self, out: &mut Box<dyn Write + 'a>, _rec: &StorageRecord, res: &StorageResult, opts: &FormatOpts, ) { if opts.full { writeln!(out, "Resource stat:\n").unwrap(); res.all_rstat.format(out, "ALL", opts); writeln!(out, "").unwrap(); res.final_rstat.format(out, "FINAL", opts); writeln!(out, "").unwrap(); } writeln!( out, "IO BPS: read_final={} write_final={} read_all={} write_all={}", format_size(res.final_rstat.io_bps.0["mean"]), format_size(res.final_rstat.io_bps.1["mean"]), format_size(res.all_rstat.io_bps.0["mean"]), format_size(res.all_rstat.io_bps.1["mean"]) ) .unwrap(); } fn format_mem_summary<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, ) { write!( out, "Memory offloading: factor={:.3}@{} ", res.mem_offload_factor, rec.mem.profile ) .unwrap(); if self.loops > 1 { writeln!( out, "usage/stdev={}/{} size/stdev={}/{} missing={}%", format_size(res.mem_usage), format_size(res.mem_usage_stdev), format_size(res.mem_size), format_size(res.mem_size_stdev), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } else { writeln!( out, "usage={} size={} missing={}%", format_size(res.mem_usage), format_size(res.mem_size), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } } pub fn format_result<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, header: bool, opts: &FormatOpts, ) { if header { self.format_header(out, rec, res, true); writeln!(out, "").unwrap(); } StudyIoLatPcts::format_rw(out, &res.iolat, opts, None); writeln!(out, "").unwrap(); self.format_rstat(out, rec, res, opts); writeln!(out, "").unwrap(); self.format_mem_summary(out, rec, res); } } impl Job for StorageJob { fn sysreqs(&self) -> BTreeSet<SysReq>
fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> { rctx.set_prep_testfiles() .disable_zswap() .start_agent(vec![])?; // Depending on mem-profile, we might be using a large balloon which // can push down available memory below workload's memory.low // cratering memory reclaim. Make sure memory protection is off. We // aren't testing memory protection. rctx.access_agent_files(|af| { af.slices.data.disable_seqs.mem = af.report.data.seq; af.slices.save().unwrap(); }); let saved_mem_avail_inner_retries = self.mem_avail_inner_retries; let mut started_at; let mut final_mem_probe_periods = vec![]; let mut mem_usages = vec![]; let mut mem_sizes = vec![]; let mut fake_cpu_bench; 'outer: loop { final_mem_probe_periods.clear(); mem_usages.clear(); mem_sizes.clear(); self.mem_avail_inner_retries = saved_mem_avail_inner_retries; started_at = unix_now(); let base = HashdFakeCpuBench::base(rctx); fake_cpu_bench = HashdFakeCpuBench { rps_max: self.rps_max.unwrap_or(base.rps_max), hash_size: self.hash_size.unwrap_or(base.hash_size), chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages), log_bps: self.log_bps, ..base }; // We now know all the parameters. Let's run the actual benchmark. 'inner: loop { info!( "storage: Measuring supportable memory footprint and IO latencies ({}/{})", mem_sizes.len() + 1, self.loops ); let (mem_size, mem_avail_err) = self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?; // check for both going over and under, see the above function if mem_avail_err.abs() > self.mem_avail_err_max &&!rctx.test { warn!( "storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle", mem_avail_err * 100.0, self.mem_avail_err_max * 100.0 ); if self.process_retry(rctx)? { continue 'outer; } else { continue 'inner; } } else { self.prev_mem_avail = 0; self.first_try = false; } final_mem_probe_periods.push((self.mem_probe_at, unix_now())); mem_usages.push(self.mem_usage as f64); mem_sizes.push(mem_size as f64); info!( "storage: Supportable memory footprint {}", format_size(mem_size) ); if mem_sizes.len() >= self.loops as usize { break 'outer; } } } Ok(serde_json::to_value(&StorageRecord { period: (started_at, unix_now()), final_mem_probe_periods, base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()), mem: rctx.mem_info().clone(), mem_usages, mem_sizes, })?) } fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> { let rec: StorageRecord = parse_json_value_or_dump(rec_json)?; // Study and record the results. let all_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx); let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None); let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None); let mut studies = Studies::new() .add_multiple(&mut all_rstat_study.studies()) .add_multiple(&mut study_read_lat_pcts.studies()) .add_multiple(&mut study_write_lat_pcts.studies()); let nr_reports = studies.run(rctx, rec.period)?; let final_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx); let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies()); for (start, end) in rec.final_mem_probe_periods.iter() { studies.run(rctx, (*start, *end))?; } let mem_usage = statistical::mean(&rec.mem_usages); let mem_usage_stdev = if rec.mem_usages.len() > 1 { statistical::standard_deviation(&rec.mem_usages, None)
{ HASHD_SYSREQS.clone() }
identifier_body
storage.rs
hash_size: None, chunk_pages: None, log_bps: dfl_params.log_bps, mem_avail_err_max: 0.1, mem_avail_inner_retries: 2, mem_avail_outer_retries: 2, first_try: true, mem_usage: 0, mem_probe_at: 0, prev_mem_avail: 0, } } } pub struct StorageBench {} impl Bench for StorageBench { fn desc(&self) -> BenchDesc { BenchDesc::new("storage", "Benchmark storage device with rd-hashd") .takes_run_props() .crit_mem_prot_only() } fn parse(&self, spec: &JobSpec, _prev_data: Option<&JobData>) -> Result<Box<dyn Job>> { Ok(Box::new(StorageJob::parse(spec)?)) } fn doc<'a>(&self, out: &mut Box<dyn Write + 'a>) -> Result<()> { const DOC: &[u8] = include_bytes!("../../doc/storage.md"); write!(out, "{}", String::from_utf8_lossy(DOC))?; Ok(()) } } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StorageRecord { pub period: (u64, u64), pub final_mem_probe_periods: Vec<(u64, u64)>, pub base_hashd_knobs: HashdKnobs, pub mem: MemInfo, pub mem_usages: Vec<f64>, pub mem_sizes: Vec<f64>, } #[derive(Clone, Serialize, Deserialize)] pub struct StorageResult { pub mem_offload_factor: f64, pub mem_usage: usize, pub mem_usage_stdev: usize, pub mem_size: usize, pub mem_size_stdev: usize, pub all_rstat: ResourceStat, pub final_rstat: ResourceStat, pub iolat: [BTreeMap<String, BTreeMap<String, f64>>; 2], pub nr_reports: (u64, u64), } impl StorageJob { pub fn parse(spec: &JobSpec) -> Result<StorageJob> { let mut job = StorageJob::default(); for (k, v) in spec.props[0].iter() { match k.as_str() { "apply" => job.apply = v.len() == 0 || v.parse::<bool>()?, "commit" => job.commit = v.len() == 0 || v.parse::<bool>()?, "loops" => job.loops = v.parse::<u32>()?, "rps-max" => job.rps_max = Some(v.parse::<u32>()?), "hash-size" => job.hash_size = Some(parse_size(v)? as usize), "chunk-pages" => job.chunk_pages = Some(v.parse::<usize>()?), "log-bps" => job.log_bps = parse_size(v)?, "mem-avail-err-max" => job.mem_avail_err_max = v.parse::<f64>()?, "mem-avail-inner-retries" => job.mem_avail_inner_retries = v.parse::<u32>()?, "mem-avail-outer-retries" => job.mem_avail_outer_retries = v.parse::<u32>()?, k => bail!("unknown property key {:?}", k), } } if job.commit { job.apply = true; } Ok(job) } fn hashd_mem_usage_rep(rep: &rd_agent_intf::Report) -> usize { match rep.usages.get(HASHD_BENCH_SVC_NAME) { Some(usage) => usage.mem_bytes as usize, None => 0, } } fn measure_supportable_memory_size( &mut self, rctx: &mut RunCtx, fake_cpu_bench: &HashdFakeCpuBench, ) -> Result<(usize, f64)> { fake_cpu_bench.start(rctx)?; const NR_MEM_USAGES: usize = 10; let mut mem_usages = VecDeque::<usize>::new(); let mut mem_avail_err: f64 = 0.0; rctx.wait_cond( |af, progress| { let cmd = &af.cmd.data; let bench = &af.bench.data; let rep = &af.report.data; // Use period max to avoid confusions from temporary drops // caused by e.g. bench completion. mem_usages.push_front(Self::hashd_mem_usage_rep(rep)); mem_usages.truncate(NR_MEM_USAGES); self.mem_usage = mem_usages.iter().fold(0, |max, u| max.max(*u)); self.mem_probe_at = rep.bench_hashd.mem_probe_at.timestamp() as u64; if!rctx.test { let mem = rctx.mem_info(); mem_avail_err = (self.mem_usage as f64 - mem.target as f64) / mem.target as f64; } // Abort early iff we go over. Memory usage may keep rising // through refine stages, so we'll check for going under // after run completion. if mem_avail_err > self.mem_avail_err_max && rep.bench_hashd.phase > rd_hashd_intf::Phase::BenchMemBisect { return true; } progress.set_status(&format!( "[{}] mem: {:>5}/{:>5}({:+5.1}%) rw:{:>5}/{:>5} p50/90/99: {:>5}/{:>5}/{:>5}", rep.bench_hashd.phase.name(), format_size(rep.bench_hashd.mem_probe_size), format_size(self.mem_usage), mem_avail_err * 100.0, format_size_dashed(rep.usages[ROOT_SLICE].io_rbps), format_size_dashed(rep.usages[ROOT_SLICE].io_wbps), format_duration_dashed(rep.iolat.map["read"]["50"]), format_duration_dashed(rep.iolat.map["read"]["90"]), format_duration_dashed(rep.iolat.map["read"]["99"]), )); bench.hashd_seq >= cmd.bench_hashd_seq }, None, Some(BenchProgress::new().monitor_systemd_unit(HASHD_BENCH_SVC_NAME)), )?; rctx.stop_hashd_bench()?; if mem_avail_err > self.mem_avail_err_max { return Ok((0, mem_avail_err)); } let mem_size = rctx.access_agent_files(|af| { af.bench.data.hashd.mem_size as f64 * af.bench.data.hashd.mem_frac }) as usize; Ok((mem_size, mem_avail_err)) } fn process_retry(&mut self, rctx: &mut RunCtx) -> Result<bool> { let mem = rctx.mem_info(); let cur_mem_avail = mem.avail + self.mem_usage - mem.target; let consistent = (cur_mem_avail as f64 - self.prev_mem_avail as f64).abs() < self.mem_avail_err_max * cur_mem_avail as f64; let retry_outer = match (self.first_try, consistent, self.mem_avail_inner_retries > 0) { (true, _, _) => { warn!( "storage: Starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, true, _) => { warn!( "storage: mem_avail consistent with the last, \ starting over with new mem_avail {}", format_size(cur_mem_avail) ); true } (false, false, false) => { warn!("storage: Ran out of inner tries, starting over"); true } (false, false, true) => { warn!( "storage: Retrying without updating mem_avail {} (prev {}, cur {})", format_size(mem.avail), format_size(self.prev_mem_avail), format_size(cur_mem_avail) ); self.mem_avail_inner_retries -= 1; false } }; if retry_outer { rctx.update_mem_avail(cur_mem_avail)?; if self.mem_avail_outer_retries == 0 { bail!("available memory keeps fluctuating, keep the system idle"); } self.mem_avail_outer_retries -= 1; } self.prev_mem_avail = cur_mem_avail; self.first_try = false; Ok(retry_outer) } pub fn format_header<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, _res: &StorageResult, include_loops: bool, ) { write!( out, "Params: hash_size={} rps_max={} log_bps={}", format_size(rec.base_hashd_knobs.hash_size), self.rps_max.unwrap_or(rec.base_hashd_knobs.rps_max), format_size(self.log_bps) ) .unwrap(); if include_loops { writeln!(out, " loops={}", self.loops).unwrap(); } else { writeln!(out, "").unwrap(); } } fn format_rstat<'a>( &self, out: &mut Box<dyn Write + 'a>, _rec: &StorageRecord, res: &StorageResult, opts: &FormatOpts, ) { if opts.full { writeln!(out, "Resource stat:\n").unwrap(); res.all_rstat.format(out, "ALL", opts); writeln!(out, "").unwrap(); res.final_rstat.format(out, "FINAL", opts); writeln!(out, "").unwrap(); } writeln!( out, "IO BPS: read_final={} write_final={} read_all={} write_all={}", format_size(res.final_rstat.io_bps.0["mean"]), format_size(res.final_rstat.io_bps.1["mean"]), format_size(res.all_rstat.io_bps.0["mean"]), format_size(res.all_rstat.io_bps.1["mean"]) ) .unwrap(); } fn format_mem_summary<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, ) { write!( out, "Memory offloading: factor={:.3}@{} ", res.mem_offload_factor, rec.mem.profile ) .unwrap(); if self.loops > 1 { writeln!( out, "usage/stdev={}/{} size/stdev={}/{} missing={}%", format_size(res.mem_usage), format_size(res.mem_usage_stdev), format_size(res.mem_size), format_size(res.mem_size_stdev), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } else { writeln!( out, "usage={} size={} missing={}%", format_size(res.mem_usage), format_size(res.mem_size), format_pct(Studies::reports_missing(res.nr_reports)), ) .unwrap(); } } pub fn format_result<'a>( &self, out: &mut Box<dyn Write + 'a>, rec: &StorageRecord, res: &StorageResult, header: bool, opts: &FormatOpts, ) { if header { self.format_header(out, rec, res, true); writeln!(out, "").unwrap(); } StudyIoLatPcts::format_rw(out, &res.iolat, opts, None); writeln!(out, "").unwrap(); self.format_rstat(out, rec, res, opts); writeln!(out, "").unwrap(); self.format_mem_summary(out, rec, res); } } impl Job for StorageJob { fn sysreqs(&self) -> BTreeSet<SysReq> { HASHD_SYSREQS.clone() } fn run(&mut self, rctx: &mut RunCtx) -> Result<serde_json::Value> { rctx.set_prep_testfiles() .disable_zswap() .start_agent(vec![])?; // Depending on mem-profile, we might be using a large balloon which // can push down available memory below workload's memory.low // cratering memory reclaim. Make sure memory protection is off. We // aren't testing memory protection. rctx.access_agent_files(|af| { af.slices.data.disable_seqs.mem = af.report.data.seq; af.slices.save().unwrap(); }); let saved_mem_avail_inner_retries = self.mem_avail_inner_retries; let mut started_at; let mut final_mem_probe_periods = vec![]; let mut mem_usages = vec![]; let mut mem_sizes = vec![]; let mut fake_cpu_bench; 'outer: loop { final_mem_probe_periods.clear(); mem_usages.clear(); mem_sizes.clear(); self.mem_avail_inner_retries = saved_mem_avail_inner_retries; started_at = unix_now(); let base = HashdFakeCpuBench::base(rctx); fake_cpu_bench = HashdFakeCpuBench { rps_max: self.rps_max.unwrap_or(base.rps_max), hash_size: self.hash_size.unwrap_or(base.hash_size), chunk_pages: self.chunk_pages.unwrap_or(base.chunk_pages), log_bps: self.log_bps, ..base }; // We now know all the parameters. Let's run the actual benchmark. 'inner: loop { info!( "storage: Measuring supportable memory footprint and IO latencies ({}/{})", mem_sizes.len() + 1, self.loops ); let (mem_size, mem_avail_err) = self.measure_supportable_memory_size(rctx, &fake_cpu_bench)?; // check for both going over and under, see the above function if mem_avail_err.abs() > self.mem_avail_err_max &&!rctx.test { warn!( "storage: mem_avail error |{:.2}|% > {:.2}%, please keep system idle", mem_avail_err * 100.0, self.mem_avail_err_max * 100.0 ); if self.process_retry(rctx)? { continue 'outer; } else { continue 'inner; } } else
final_mem_probe_periods.push((self.mem_probe_at, unix_now())); mem_usages.push(self.mem_usage as f64); mem_sizes.push(mem_size as f64); info!( "storage: Supportable memory footprint {}", format_size(mem_size) ); if mem_sizes.len() >= self.loops as usize { break 'outer; } } } Ok(serde_json::to_value(&StorageRecord { period: (started_at, unix_now()), final_mem_probe_periods, base_hashd_knobs: rctx.access_agent_files(|af| af.bench.data.hashd.clone()), mem: rctx.mem_info().clone(), mem_usages, mem_sizes, })?) } fn study(&self, rctx: &mut RunCtx, rec_json: serde_json::Value) -> Result<serde_json::Value> { let rec: StorageRecord = parse_json_value_or_dump(rec_json)?; // Study and record the results. let all_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut all_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &all_rstat_study_ctx); let mut study_read_lat_pcts = StudyIoLatPcts::new("read", None); let mut study_write_lat_pcts = StudyIoLatPcts::new("write", None); let mut studies = Studies::new() .add_multiple(&mut all_rstat_study.studies()) .add_multiple(&mut study_read_lat_pcts.studies()) .add_multiple(&mut study_write_lat_pcts.studies()); let nr_reports = studies.run(rctx, rec.period)?; let final_rstat_study_ctx = ResourceStatStudyCtx::new(); let mut final_rstat_study = ResourceStatStudy::new(ROOT_SLICE, &final_rstat_study_ctx); let mut studies = Studies::new().add_multiple(&mut final_rstat_study.studies()); for (start, end) in rec.final_mem_probe_periods.iter() { studies.run(rctx, (*start, *end))?; } let mem_usage = statistical::mean(&rec.mem_usages); let mem_usage_stdev = if rec.mem_usages.len() > 1 { statistical::standard_deviation(&rec.mem_usages, None)
{ self.prev_mem_avail = 0; self.first_try = false; }
conditional_block
categorical.rs
//! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies //! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents //! //! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html) //! //! ### Usage Example //! ``` //! use smartcore::linalg::naive::dense_matrix::DenseMatrix; //! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams}; //! let data = DenseMatrix::from_2d_array(&[ //! &[1.5, 1.0, 1.5, 3.0], //! &[1.5, 2.0, 1.5, 4.0], //! &[1.5, 1.0, 1.5, 5.0], //! &[1.5, 2.0, 1.5, 6.0], //! ]); //! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]); //! // Infer number of categories from data and return a reusable encoder //! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap(); //! // Transform categorical to one-hot encoded (can transform similar) //! let oh_data = encoder.transform(&data).unwrap(); //! // Produces the following: //! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0] //! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0] //! ``` use std::iter; use crate::error::Failed; use crate::linalg::Matrix; use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable}; use crate::preprocessing::series_encoder::CategoryMapper; /// OneHotEncoder Parameters #[derive(Debug, Clone)] pub struct OneHotEncoderParams { /// Column number that contain categorical variable pub col_idx_categorical: Option<Vec<usize>>, /// (Currently not implemented) Try and infer which of the matrix columns are categorical variables infer_categorical: bool, } impl OneHotEncoderParams { /// Generate parameters from categorical variable column numbers pub fn from_cat_idx(categorical_params: &[usize]) -> Self { Self { col_idx_categorical: Some(categorical_params.to_vec()), infer_categorical: false, } } } /// Calculate the offset to parameters to due introduction of one-hot encoding fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> { // This functions uses iterators and returns a vector. // In case we get a huge amount of paramenters this might be a problem // todo: Change this such that it will return an iterator let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1)); // Offset is constant between two categorical values, here we calculate the number of steps // that remain constant let repeats = cat_idx.scan(0, |a, v| { let im = v + 1 - *a; *a = v; Some(im) }); // Calculate the offset to parameter idx due to newly intorduced one-hot vectors let offset_ = cat_sizes.iter().scan(0, |a, &v| { *a = *a + v - 1; Some(*a) }); let offset = (0..1).chain(offset_); let new_param_idxs: Vec<usize> = (0..num_params) .zip( repeats .zip(offset) .map(|(r, o)| iter::repeat(o).take(r)) .flatten(), ) .map(|(idx, ofst)| idx + ofst) .collect(); new_param_idxs } fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool { for v in data { if!v.is_valid() { return false; } } true } /// Encode Categorical variavbles of data matrix to one-hot #[derive(Debug, Clone)] pub struct
{ category_mappers: Vec<CategoryMapper<CategoricalFloat>>, col_idx_categorical: Vec<usize>, } impl OneHotEncoder { /// Create an encoder instance with categories infered from data matrix pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed> where T: Categorizable, M: Matrix<T>, { match (params.col_idx_categorical, params.infer_categorical) { (None, false) => Err(Failed::fit( "Must pass categorical series ids or infer flag", )), (Some(_idxs), true) => Err(Failed::fit( "Ambigous parameters, got both infer and categroy ids", )), (Some(mut idxs), false) => { // make sure categories have same order as data columns idxs.sort_unstable(); let (nrows, _) = data.shape(); // col buffer to avoid allocations let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect(); let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len()); for &idx in &idxs { data.copy_col_as_vec(idx, &mut col_buf); if!validate_col_is_categorical(&col_buf) { let msg = format!( "Column {} of data matrix containts non categorizable (integer) values", idx ); return Err(Failed::fit(&msg[..])); } let hashable_col = col_buf.iter().map(|v| v.to_category()); res.push(CategoryMapper::fit_to_iter(hashable_col)); } Ok(Self { category_mappers: res, col_idx_categorical: idxs, }) } (None, true) => { todo!("Auto-Inference for Categorical Variables not yet implemented") } } } /// Transform categorical variables to one-hot encoded and return a new matrix pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed> where T: Categorizable, M: Matrix<T>, { let (nrows, p) = x.shape(); let additional_params: Vec<usize> = self .category_mappers .iter() .map(|enc| enc.num_categories()) .collect(); // Eac category of size v adds v-1 params let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1); let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]); let mut res = M::zeros(nrows, expandws_p); for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() { let cidx = new_col_idx[old_cidx]; let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category()); let sencoder = &self.category_mappers[pidx]; let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c)); for (row, oh_vec) in oh_series.enumerate() { match oh_vec { None => { // Since we support T types, bad value in a series causes in to be invalid let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx); return Err(Failed::transform(&msg[..])); } Some(v) => { // copy one hot vectors to their place in the data matrix; for (col_ofst, &val) in v.iter().enumerate() { res.set(row, cidx + col_ofst, val); } } } } } // copy old data in x to their new location while skipping catergorical vars (already treated) let mut skip_idx_iter = self.col_idx_categorical.iter(); let mut cur_skip = skip_idx_iter.next(); for (old_p, &new_p) in new_col_idx.iter().enumerate() { // if found treated varible, skip it if let Some(&v) = cur_skip { if v == old_p { cur_skip = skip_idx_iter.next(); continue; } } for r in 0..nrows { let val = x.get(r, old_p); res.set(r, new_p, val); } } Ok(res) } } #[cfg(test)] mod tests { use super::*; use crate::linalg::naive::dense_matrix::DenseMatrix; use crate::preprocessing::series_encoder::CategoryMapper; #[test] fn adjust_idxs() { assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new()); // [0,1,2] -> [0, 1, 1, 1, 2] assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]); } fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) { let orig = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) { // Categorical first and last let orig = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 1.5, 3.0], &[1.5, 2.0, 1.5, 4.0], &[1.5, 1.0, 1.5, 5.0], &[1.5, 2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } #[test] fn hash_encode_f64_series() { let series = vec![3.0, 1.0, 2.0, 1.0]; let hashable_series: Vec<CategoricalFloat> = series.iter().map(|v| v.to_category()).collect(); let enc = CategoryMapper::from_positional_category_vec(hashable_series); let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]); let orig_val: f64 = inv.unwrap().into(); assert_eq!(orig_val, 2.0); } #[test] fn test_fit() { let (x, _) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); assert_eq!(oh_enc.category_mappers.len(), 2); let num_cat: Vec<usize> = oh_enc .category_mappers .iter() .map(|a| a.num_categories()) .collect(); assert_eq!(num_cat, vec![2, 4]); } #[test] fn matrix_transform_test() { let (x, expected_x) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); let (x, expected_x) = build_cat_first_and_last(); let params = OneHotEncoderParams::from_cat_idx(&[0, 2]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); } #[test] fn fail_on_bad_category() { let m = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let params = OneHotEncoderParams::from_cat_idx(&[1]); match OneHotEncoder::fit(&m, params) { Err(_) => { assert!(true); } _ => assert!(false), } } }
OneHotEncoder
identifier_name
categorical.rs
//! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies //! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents //! //! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html) //! //! ### Usage Example //! ``` //! use smartcore::linalg::naive::dense_matrix::DenseMatrix; //! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams}; //! let data = DenseMatrix::from_2d_array(&[ //! &[1.5, 1.0, 1.5, 3.0], //! &[1.5, 2.0, 1.5, 4.0], //! &[1.5, 1.0, 1.5, 5.0], //! &[1.5, 2.0, 1.5, 6.0], //! ]); //! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]); //! // Infer number of categories from data and return a reusable encoder //! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap(); //! // Transform categorical to one-hot encoded (can transform similar) //! let oh_data = encoder.transform(&data).unwrap(); //! // Produces the following: //! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0] //! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0] //! ``` use std::iter; use crate::error::Failed; use crate::linalg::Matrix; use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable}; use crate::preprocessing::series_encoder::CategoryMapper; /// OneHotEncoder Parameters #[derive(Debug, Clone)] pub struct OneHotEncoderParams { /// Column number that contain categorical variable pub col_idx_categorical: Option<Vec<usize>>, /// (Currently not implemented) Try and infer which of the matrix columns are categorical variables infer_categorical: bool, } impl OneHotEncoderParams { /// Generate parameters from categorical variable column numbers pub fn from_cat_idx(categorical_params: &[usize]) -> Self { Self { col_idx_categorical: Some(categorical_params.to_vec()), infer_categorical: false, } } } /// Calculate the offset to parameters to due introduction of one-hot encoding fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> { // This functions uses iterators and returns a vector. // In case we get a huge amount of paramenters this might be a problem // todo: Change this such that it will return an iterator
let repeats = cat_idx.scan(0, |a, v| { let im = v + 1 - *a; *a = v; Some(im) }); // Calculate the offset to parameter idx due to newly intorduced one-hot vectors let offset_ = cat_sizes.iter().scan(0, |a, &v| { *a = *a + v - 1; Some(*a) }); let offset = (0..1).chain(offset_); let new_param_idxs: Vec<usize> = (0..num_params) .zip( repeats .zip(offset) .map(|(r, o)| iter::repeat(o).take(r)) .flatten(), ) .map(|(idx, ofst)| idx + ofst) .collect(); new_param_idxs } fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool { for v in data { if!v.is_valid() { return false; } } true } /// Encode Categorical variavbles of data matrix to one-hot #[derive(Debug, Clone)] pub struct OneHotEncoder { category_mappers: Vec<CategoryMapper<CategoricalFloat>>, col_idx_categorical: Vec<usize>, } impl OneHotEncoder { /// Create an encoder instance with categories infered from data matrix pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed> where T: Categorizable, M: Matrix<T>, { match (params.col_idx_categorical, params.infer_categorical) { (None, false) => Err(Failed::fit( "Must pass categorical series ids or infer flag", )), (Some(_idxs), true) => Err(Failed::fit( "Ambigous parameters, got both infer and categroy ids", )), (Some(mut idxs), false) => { // make sure categories have same order as data columns idxs.sort_unstable(); let (nrows, _) = data.shape(); // col buffer to avoid allocations let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect(); let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len()); for &idx in &idxs { data.copy_col_as_vec(idx, &mut col_buf); if!validate_col_is_categorical(&col_buf) { let msg = format!( "Column {} of data matrix containts non categorizable (integer) values", idx ); return Err(Failed::fit(&msg[..])); } let hashable_col = col_buf.iter().map(|v| v.to_category()); res.push(CategoryMapper::fit_to_iter(hashable_col)); } Ok(Self { category_mappers: res, col_idx_categorical: idxs, }) } (None, true) => { todo!("Auto-Inference for Categorical Variables not yet implemented") } } } /// Transform categorical variables to one-hot encoded and return a new matrix pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed> where T: Categorizable, M: Matrix<T>, { let (nrows, p) = x.shape(); let additional_params: Vec<usize> = self .category_mappers .iter() .map(|enc| enc.num_categories()) .collect(); // Eac category of size v adds v-1 params let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1); let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]); let mut res = M::zeros(nrows, expandws_p); for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() { let cidx = new_col_idx[old_cidx]; let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category()); let sencoder = &self.category_mappers[pidx]; let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c)); for (row, oh_vec) in oh_series.enumerate() { match oh_vec { None => { // Since we support T types, bad value in a series causes in to be invalid let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx); return Err(Failed::transform(&msg[..])); } Some(v) => { // copy one hot vectors to their place in the data matrix; for (col_ofst, &val) in v.iter().enumerate() { res.set(row, cidx + col_ofst, val); } } } } } // copy old data in x to their new location while skipping catergorical vars (already treated) let mut skip_idx_iter = self.col_idx_categorical.iter(); let mut cur_skip = skip_idx_iter.next(); for (old_p, &new_p) in new_col_idx.iter().enumerate() { // if found treated varible, skip it if let Some(&v) = cur_skip { if v == old_p { cur_skip = skip_idx_iter.next(); continue; } } for r in 0..nrows { let val = x.get(r, old_p); res.set(r, new_p, val); } } Ok(res) } } #[cfg(test)] mod tests { use super::*; use crate::linalg::naive::dense_matrix::DenseMatrix; use crate::preprocessing::series_encoder::CategoryMapper; #[test] fn adjust_idxs() { assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new()); // [0,1,2] -> [0, 1, 1, 1, 2] assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]); } fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) { let orig = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) { // Categorical first and last let orig = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 1.5, 3.0], &[1.5, 2.0, 1.5, 4.0], &[1.5, 1.0, 1.5, 5.0], &[1.5, 2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } #[test] fn hash_encode_f64_series() { let series = vec![3.0, 1.0, 2.0, 1.0]; let hashable_series: Vec<CategoricalFloat> = series.iter().map(|v| v.to_category()).collect(); let enc = CategoryMapper::from_positional_category_vec(hashable_series); let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]); let orig_val: f64 = inv.unwrap().into(); assert_eq!(orig_val, 2.0); } #[test] fn test_fit() { let (x, _) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); assert_eq!(oh_enc.category_mappers.len(), 2); let num_cat: Vec<usize> = oh_enc .category_mappers .iter() .map(|a| a.num_categories()) .collect(); assert_eq!(num_cat, vec![2, 4]); } #[test] fn matrix_transform_test() { let (x, expected_x) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); let (x, expected_x) = build_cat_first_and_last(); let params = OneHotEncoderParams::from_cat_idx(&[0, 2]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); } #[test] fn fail_on_bad_category() { let m = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let params = OneHotEncoderParams::from_cat_idx(&[1]); match OneHotEncoder::fit(&m, params) { Err(_) => { assert!(true); } _ => assert!(false), } } }
let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1)); // Offset is constant between two categorical values, here we calculate the number of steps // that remain constant
random_line_split
categorical.rs
//! # One-hot Encoding For [RealNumber](../../math/num/trait.RealNumber.html) Matricies //! Transform a data [Matrix](../../linalg/trait.BaseMatrix.html) by replacing all categorical variables with their one-hot equivalents //! //! Internally OneHotEncoder treats every categorical column as a series and transforms it using [CategoryMapper](../series_encoder/struct.CategoryMapper.html) //! //! ### Usage Example //! ``` //! use smartcore::linalg::naive::dense_matrix::DenseMatrix; //! use smartcore::preprocessing::categorical::{OneHotEncoder, OneHotEncoderParams}; //! let data = DenseMatrix::from_2d_array(&[ //! &[1.5, 1.0, 1.5, 3.0], //! &[1.5, 2.0, 1.5, 4.0], //! &[1.5, 1.0, 1.5, 5.0], //! &[1.5, 2.0, 1.5, 6.0], //! ]); //! let encoder_params = OneHotEncoderParams::from_cat_idx(&[1, 3]); //! // Infer number of categories from data and return a reusable encoder //! let encoder = OneHotEncoder::fit(&data, encoder_params).unwrap(); //! // Transform categorical to one-hot encoded (can transform similar) //! let oh_data = encoder.transform(&data).unwrap(); //! // Produces the following: //! // &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0] //! // &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0] //! // &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0] //! ``` use std::iter; use crate::error::Failed; use crate::linalg::Matrix; use crate::preprocessing::data_traits::{CategoricalFloat, Categorizable}; use crate::preprocessing::series_encoder::CategoryMapper; /// OneHotEncoder Parameters #[derive(Debug, Clone)] pub struct OneHotEncoderParams { /// Column number that contain categorical variable pub col_idx_categorical: Option<Vec<usize>>, /// (Currently not implemented) Try and infer which of the matrix columns are categorical variables infer_categorical: bool, } impl OneHotEncoderParams { /// Generate parameters from categorical variable column numbers pub fn from_cat_idx(categorical_params: &[usize]) -> Self
} /// Calculate the offset to parameters to due introduction of one-hot encoding fn find_new_idxs(num_params: usize, cat_sizes: &[usize], cat_idxs: &[usize]) -> Vec<usize> { // This functions uses iterators and returns a vector. // In case we get a huge amount of paramenters this might be a problem // todo: Change this such that it will return an iterator let cat_idx = cat_idxs.iter().copied().chain((num_params..).take(1)); // Offset is constant between two categorical values, here we calculate the number of steps // that remain constant let repeats = cat_idx.scan(0, |a, v| { let im = v + 1 - *a; *a = v; Some(im) }); // Calculate the offset to parameter idx due to newly intorduced one-hot vectors let offset_ = cat_sizes.iter().scan(0, |a, &v| { *a = *a + v - 1; Some(*a) }); let offset = (0..1).chain(offset_); let new_param_idxs: Vec<usize> = (0..num_params) .zip( repeats .zip(offset) .map(|(r, o)| iter::repeat(o).take(r)) .flatten(), ) .map(|(idx, ofst)| idx + ofst) .collect(); new_param_idxs } fn validate_col_is_categorical<T: Categorizable>(data: &[T]) -> bool { for v in data { if!v.is_valid() { return false; } } true } /// Encode Categorical variavbles of data matrix to one-hot #[derive(Debug, Clone)] pub struct OneHotEncoder { category_mappers: Vec<CategoryMapper<CategoricalFloat>>, col_idx_categorical: Vec<usize>, } impl OneHotEncoder { /// Create an encoder instance with categories infered from data matrix pub fn fit<T, M>(data: &M, params: OneHotEncoderParams) -> Result<OneHotEncoder, Failed> where T: Categorizable, M: Matrix<T>, { match (params.col_idx_categorical, params.infer_categorical) { (None, false) => Err(Failed::fit( "Must pass categorical series ids or infer flag", )), (Some(_idxs), true) => Err(Failed::fit( "Ambigous parameters, got both infer and categroy ids", )), (Some(mut idxs), false) => { // make sure categories have same order as data columns idxs.sort_unstable(); let (nrows, _) = data.shape(); // col buffer to avoid allocations let mut col_buf: Vec<T> = iter::repeat(T::zero()).take(nrows).collect(); let mut res: Vec<CategoryMapper<CategoricalFloat>> = Vec::with_capacity(idxs.len()); for &idx in &idxs { data.copy_col_as_vec(idx, &mut col_buf); if!validate_col_is_categorical(&col_buf) { let msg = format!( "Column {} of data matrix containts non categorizable (integer) values", idx ); return Err(Failed::fit(&msg[..])); } let hashable_col = col_buf.iter().map(|v| v.to_category()); res.push(CategoryMapper::fit_to_iter(hashable_col)); } Ok(Self { category_mappers: res, col_idx_categorical: idxs, }) } (None, true) => { todo!("Auto-Inference for Categorical Variables not yet implemented") } } } /// Transform categorical variables to one-hot encoded and return a new matrix pub fn transform<T, M>(&self, x: &M) -> Result<M, Failed> where T: Categorizable, M: Matrix<T>, { let (nrows, p) = x.shape(); let additional_params: Vec<usize> = self .category_mappers .iter() .map(|enc| enc.num_categories()) .collect(); // Eac category of size v adds v-1 params let expandws_p: usize = p + additional_params.iter().fold(0, |cs, &v| cs + v - 1); let new_col_idx = find_new_idxs(p, &additional_params[..], &self.col_idx_categorical[..]); let mut res = M::zeros(nrows, expandws_p); for (pidx, &old_cidx) in self.col_idx_categorical.iter().enumerate() { let cidx = new_col_idx[old_cidx]; let col_iter = (0..nrows).map(|r| x.get(r, old_cidx).to_category()); let sencoder = &self.category_mappers[pidx]; let oh_series = col_iter.map(|c| sencoder.get_one_hot::<T, Vec<T>>(&c)); for (row, oh_vec) in oh_series.enumerate() { match oh_vec { None => { // Since we support T types, bad value in a series causes in to be invalid let msg = format!("At least one value in column {} doesn't conform to category definition", old_cidx); return Err(Failed::transform(&msg[..])); } Some(v) => { // copy one hot vectors to their place in the data matrix; for (col_ofst, &val) in v.iter().enumerate() { res.set(row, cidx + col_ofst, val); } } } } } // copy old data in x to their new location while skipping catergorical vars (already treated) let mut skip_idx_iter = self.col_idx_categorical.iter(); let mut cur_skip = skip_idx_iter.next(); for (old_p, &new_p) in new_col_idx.iter().enumerate() { // if found treated varible, skip it if let Some(&v) = cur_skip { if v == old_p { cur_skip = skip_idx_iter.next(); continue; } } for r in 0..nrows { let val = x.get(r, old_p); res.set(r, new_p, val); } } Ok(res) } } #[cfg(test)] mod tests { use super::*; use crate::linalg::naive::dense_matrix::DenseMatrix; use crate::preprocessing::series_encoder::CategoryMapper; #[test] fn adjust_idxs() { assert_eq!(find_new_idxs(0, &[], &[]), Vec::<usize>::new()); // [0,1,2] -> [0, 1, 1, 1, 2] assert_eq!(find_new_idxs(3, &[3], &[1]), vec![0, 1, 4]); } fn build_cat_first_and_last() -> (DenseMatrix<f64>, DenseMatrix<f64>) { let orig = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } fn build_fake_matrix() -> (DenseMatrix<f64>, DenseMatrix<f64>) { // Categorical first and last let orig = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 1.5, 3.0], &[1.5, 2.0, 1.5, 4.0], &[1.5, 1.0, 1.5, 5.0], &[1.5, 2.0, 1.5, 6.0], ]); let oh_enc = DenseMatrix::from_2d_array(&[ &[1.5, 1.0, 0.0, 1.5, 1.0, 0.0, 0.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 1.0, 0.0, 0.0], &[1.5, 1.0, 0.0, 1.5, 0.0, 0.0, 1.0, 0.0], &[1.5, 0.0, 1.0, 1.5, 0.0, 0.0, 0.0, 1.0], ]); (orig, oh_enc) } #[test] fn hash_encode_f64_series() { let series = vec![3.0, 1.0, 2.0, 1.0]; let hashable_series: Vec<CategoricalFloat> = series.iter().map(|v| v.to_category()).collect(); let enc = CategoryMapper::from_positional_category_vec(hashable_series); let inv = enc.invert_one_hot(vec![0.0, 0.0, 1.0]); let orig_val: f64 = inv.unwrap().into(); assert_eq!(orig_val, 2.0); } #[test] fn test_fit() { let (x, _) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); assert_eq!(oh_enc.category_mappers.len(), 2); let num_cat: Vec<usize> = oh_enc .category_mappers .iter() .map(|a| a.num_categories()) .collect(); assert_eq!(num_cat, vec![2, 4]); } #[test] fn matrix_transform_test() { let (x, expected_x) = build_fake_matrix(); let params = OneHotEncoderParams::from_cat_idx(&[1, 3]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); let (x, expected_x) = build_cat_first_and_last(); let params = OneHotEncoderParams::from_cat_idx(&[0, 2]); let oh_enc = OneHotEncoder::fit(&x, params).unwrap(); let nm = oh_enc.transform(&x).unwrap(); assert_eq!(nm, expected_x); } #[test] fn fail_on_bad_category() { let m = DenseMatrix::from_2d_array(&[ &[1.0, 1.5, 3.0], &[2.0, 1.5, 4.0], &[1.0, 1.5, 5.0], &[2.0, 1.5, 6.0], ]); let params = OneHotEncoderParams::from_cat_idx(&[1]); match OneHotEncoder::fit(&m, params) { Err(_) => { assert!(true); } _ => assert!(false), } } }
{ Self { col_idx_categorical: Some(categorical_params.to_vec()), infer_categorical: false, } }
identifier_body
view.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{app::App, geometry::Size}; use failure::Error; use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd}; use fidl_fuchsia_ui_gfx as gfx; use fidl_fuchsia_ui_input; use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest}; use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest}; use fuchsia_async as fasync; use fuchsia_scenic::{ImportNode, Session, SessionPtr}; use fuchsia_zircon as zx; use futures::{TryFutureExt, TryStreamExt}; use std::any::Any; /// enum that defines all messages sent with `App::send_message` that /// the view struct will understand and process. pub enum ViewMessages { /// Message that requests that a view redraw itself. Update, } /// parameter struct passed to setup and update trait methods. #[allow(missing_docs)] pub struct ViewAssistantContext<'a> { pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy, pub import_node: &'a ImportNode, pub session: &'a SessionPtr, pub key: ViewKey, pub logical_size: Size, pub size: Size, pub metrics: Size, pub messages: Vec<Box<dyn Any>>, } impl<'a> ViewAssistantContext<'a> { /// Queue up a message for delivery pub fn queue_message<A: Any>(&mut self, message: A) { self.messages.push(Box::new(message)); } } /// Trait that allows mod developers to customize the behavior of view controllers. pub trait ViewAssistant { /// This method is called once when a view is created. It is a good point to create scenic /// commands that apply throughout the lifetime of the view. fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when a view controller has been asked to update the view. fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when input events come from scenic to this view. fn handle_input_event( &mut self, _context: &mut ViewAssistantContext, _event: &fidl_fuchsia_ui_input::InputEvent, ) -> Result<(), Error> { Ok(()) } /// This method is called when `App::send_message` is called with the associated /// view controller's `ViewKey` and the view controller does not handle the message. fn handle_message(&mut self, _message: &Any) {} } /// Reference to an app assistant. _This type is likely to change in the future so /// using this type alias might make for easier forward migration._ pub type ViewAssistantPtr = Box<dyn ViewAssistant>; /// Key identifying a view. pub type ViewKey = u64; /// This struct takes care of all the boilerplate needed for implementing a Fuchsia /// view, forwarding the interesting implementation points to a struct implementing /// the `ViewAssistant` trait. pub struct ViewController { #[allow(unused)] view: fidl_fuchsia_ui_viewsv1::ViewProxy, view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy, session: SessionPtr, import_node: ImportNode, #[allow(unused)] key: ViewKey, assistant: ViewAssistantPtr, metrics: Size, physical_size: Size, logical_size: Size, } impl ViewController { pub(crate) fn new( app: &mut App, view_token: gfx::ExportToken, key: ViewKey, ) -> Result<ViewController, Error> { let (view, view_server_end) = create_proxy()?; let (view_listener, view_listener_request) = create_endpoints()?; let (mine, theirs) = zx::EventPair::create()?; app.view_manager.create_view2( view_server_end, view_token.value, view_listener, theirs, None, )?; let (session_listener, session_listener_request) = create_endpoints()?; let (session_proxy, session_request) = create_proxy()?; app.scenic.create_session(session_request, Some(session_listener))?; let session = Session::new(session_proxy); let mut view_assistant = app.create_view_assistant(&session)?; let mut import_node = ImportNode::new(session.clone(), mine); let (mut view_container, view_container_request) = create_proxy()?; view.get_container(view_container_request)?; let context = ViewAssistantContext { view_container: &mut view_container, import_node: &mut import_node, session: &session, key, logical_size: Size::zero(), size: Size::zero(), metrics: Size::zero(), messages: Vec::new(), }; view_assistant.setup(&context)?; let view_controller = ViewController { view, view_container: view_container, session, import_node, metrics: Size::zero(), physical_size: Size::zero(), logical_size: Size::zero(), key, assistant: view_assistant, }; Self::setup_session_listener(key, session_listener_request)?; Self::setup_view_listener(key, view_listener_request)?; Ok(view_controller) } fn setup_session_listener( key: ViewKey, session_listener_request: ServerEnd<SessionListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( session_listener_request .into_stream()? .map_ok(move |request| match request { SessionListenerRequest::OnScenicEvent { events,.. } => App::with(|app| { app.with_view(key, |view| { view.handle_session_events(events); }) }), _ => (), }) .try_collect::<()>() .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn setup_view_listener( key: ViewKey, view_listener_request: ServerEnd<ViewListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( view_listener_request .into_stream()? .try_for_each( move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| { App::with(|app| { app.with_view(key, |view| { view.handle_properties_changed(&properties); }); }); futures::future::ready(responder.send()) }, ) .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn update(&mut self) { let context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e)); self.present(); } fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) { events.iter().for_each(|event| match event { fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => { self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } fidl_fuchsia_ui_scenic::Event::Input(event) => { let mut context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant .handle_input_event(&mut context, &event) .unwrap_or_else(|e| eprintln!("handle_event: {:?}", e)); for msg in context.messages { self.send_message(&msg); } self.update(); } _ => (), }); } fn present(&self) { fasync::spawn_local( self.session .lock() .present(0) .map_ok(|_| ()) .unwrap_or_else(|e| panic!("present error: {:?}", e)), ); } fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) { if let Some(ref view_properties) = properties.view_layout { self.physical_size = Size::new(view_properties.size.width, view_properties.size.height); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } } /// This method sends an arbitrary message to this view. If it is not /// handled directly by `ViewController::send_message` it will be forwarded /// to the view assistant. pub fn send_message(&mut self, msg: &Any) { if let Some(view_msg) = msg.downcast_ref::<ViewMessages>()
else { self.assistant.handle_message(msg); } } }
{ match view_msg { ViewMessages::Update => { self.update(); } } }
conditional_block
view.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{app::App, geometry::Size}; use failure::Error; use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd}; use fidl_fuchsia_ui_gfx as gfx; use fidl_fuchsia_ui_input; use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest}; use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest}; use fuchsia_async as fasync; use fuchsia_scenic::{ImportNode, Session, SessionPtr}; use fuchsia_zircon as zx; use futures::{TryFutureExt, TryStreamExt}; use std::any::Any; /// enum that defines all messages sent with `App::send_message` that /// the view struct will understand and process. pub enum ViewMessages { /// Message that requests that a view redraw itself. Update, } /// parameter struct passed to setup and update trait methods. #[allow(missing_docs)] pub struct ViewAssistantContext<'a> { pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy, pub import_node: &'a ImportNode, pub session: &'a SessionPtr, pub key: ViewKey, pub logical_size: Size, pub size: Size, pub metrics: Size, pub messages: Vec<Box<dyn Any>>, } impl<'a> ViewAssistantContext<'a> { /// Queue up a message for delivery pub fn queue_message<A: Any>(&mut self, message: A) { self.messages.push(Box::new(message)); } } /// Trait that allows mod developers to customize the behavior of view controllers. pub trait ViewAssistant { /// This method is called once when a view is created. It is a good point to create scenic /// commands that apply throughout the lifetime of the view. fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when a view controller has been asked to update the view. fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when input events come from scenic to this view. fn handle_input_event( &mut self, _context: &mut ViewAssistantContext, _event: &fidl_fuchsia_ui_input::InputEvent, ) -> Result<(), Error> { Ok(()) } /// This method is called when `App::send_message` is called with the associated /// view controller's `ViewKey` and the view controller does not handle the message. fn handle_message(&mut self, _message: &Any) {} } /// Reference to an app assistant. _This type is likely to change in the future so /// using this type alias might make for easier forward migration._ pub type ViewAssistantPtr = Box<dyn ViewAssistant>; /// Key identifying a view. pub type ViewKey = u64; /// This struct takes care of all the boilerplate needed for implementing a Fuchsia /// view, forwarding the interesting implementation points to a struct implementing /// the `ViewAssistant` trait. pub struct ViewController { #[allow(unused)] view: fidl_fuchsia_ui_viewsv1::ViewProxy, view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy, session: SessionPtr, import_node: ImportNode, #[allow(unused)] key: ViewKey, assistant: ViewAssistantPtr, metrics: Size, physical_size: Size, logical_size: Size, } impl ViewController { pub(crate) fn new( app: &mut App, view_token: gfx::ExportToken, key: ViewKey, ) -> Result<ViewController, Error> { let (view, view_server_end) = create_proxy()?; let (view_listener, view_listener_request) = create_endpoints()?; let (mine, theirs) = zx::EventPair::create()?; app.view_manager.create_view2( view_server_end, view_token.value, view_listener, theirs, None, )?; let (session_listener, session_listener_request) = create_endpoints()?; let (session_proxy, session_request) = create_proxy()?; app.scenic.create_session(session_request, Some(session_listener))?; let session = Session::new(session_proxy); let mut view_assistant = app.create_view_assistant(&session)?; let mut import_node = ImportNode::new(session.clone(), mine); let (mut view_container, view_container_request) = create_proxy()?; view.get_container(view_container_request)?; let context = ViewAssistantContext { view_container: &mut view_container, import_node: &mut import_node, session: &session, key, logical_size: Size::zero(), size: Size::zero(), metrics: Size::zero(), messages: Vec::new(), }; view_assistant.setup(&context)?; let view_controller = ViewController { view, view_container: view_container, session, import_node, metrics: Size::zero(), physical_size: Size::zero(), logical_size: Size::zero(), key, assistant: view_assistant, }; Self::setup_session_listener(key, session_listener_request)?; Self::setup_view_listener(key, view_listener_request)?; Ok(view_controller) } fn setup_session_listener( key: ViewKey, session_listener_request: ServerEnd<SessionListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( session_listener_request .into_stream()? .map_ok(move |request| match request { SessionListenerRequest::OnScenicEvent { events,.. } => App::with(|app| { app.with_view(key, |view| { view.handle_session_events(events); }) }), _ => (), }) .try_collect::<()>() .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn setup_view_listener( key: ViewKey, view_listener_request: ServerEnd<ViewListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( view_listener_request .into_stream()? .try_for_each( move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| { App::with(|app| { app.with_view(key, |view| { view.handle_properties_changed(&properties); }); }); futures::future::ready(responder.send()) }, ) .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn update(&mut self) { let context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e)); self.present(); } fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>)
}; self.assistant .handle_input_event(&mut context, &event) .unwrap_or_else(|e| eprintln!("handle_event: {:?}", e)); for msg in context.messages { self.send_message(&msg); } self.update(); } _ => (), }); } fn present(&self) { fasync::spawn_local( self.session .lock() .present(0) .map_ok(|_| ()) .unwrap_or_else(|e| panic!("present error: {:?}", e)), ); } fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) { if let Some(ref view_properties) = properties.view_layout { self.physical_size = Size::new(view_properties.size.width, view_properties.size.height); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } } /// This method sends an arbitrary message to this view. If it is not /// handled directly by `ViewController::send_message` it will be forwarded /// to the view assistant. pub fn send_message(&mut self, msg: &Any) { if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() { match view_msg { ViewMessages::Update => { self.update(); } } } else { self.assistant.handle_message(msg); } } }
{ events.iter().for_each(|event| match event { fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => { self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } fidl_fuchsia_ui_scenic::Event::Input(event) => { let mut context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(),
identifier_body
view.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{app::App, geometry::Size}; use failure::Error; use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd}; use fidl_fuchsia_ui_gfx as gfx; use fidl_fuchsia_ui_input; use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest}; use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest}; use fuchsia_async as fasync; use fuchsia_scenic::{ImportNode, Session, SessionPtr}; use fuchsia_zircon as zx; use futures::{TryFutureExt, TryStreamExt}; use std::any::Any; /// enum that defines all messages sent with `App::send_message` that /// the view struct will understand and process. pub enum ViewMessages { /// Message that requests that a view redraw itself. Update, } /// parameter struct passed to setup and update trait methods. #[allow(missing_docs)] pub struct ViewAssistantContext<'a> { pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy, pub import_node: &'a ImportNode, pub session: &'a SessionPtr, pub key: ViewKey, pub logical_size: Size, pub size: Size, pub metrics: Size, pub messages: Vec<Box<dyn Any>>, } impl<'a> ViewAssistantContext<'a> { /// Queue up a message for delivery pub fn queue_message<A: Any>(&mut self, message: A) { self.messages.push(Box::new(message)); } } /// Trait that allows mod developers to customize the behavior of view controllers. pub trait ViewAssistant { /// This method is called once when a view is created. It is a good point to create scenic /// commands that apply throughout the lifetime of the view. fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when a view controller has been asked to update the view. fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when input events come from scenic to this view. fn handle_input_event( &mut self, _context: &mut ViewAssistantContext, _event: &fidl_fuchsia_ui_input::InputEvent, ) -> Result<(), Error> { Ok(()) } /// This method is called when `App::send_message` is called with the associated /// view controller's `ViewKey` and the view controller does not handle the message. fn handle_message(&mut self, _message: &Any) {} } /// Reference to an app assistant. _This type is likely to change in the future so /// using this type alias might make for easier forward migration._ pub type ViewAssistantPtr = Box<dyn ViewAssistant>; /// Key identifying a view. pub type ViewKey = u64; /// This struct takes care of all the boilerplate needed for implementing a Fuchsia /// view, forwarding the interesting implementation points to a struct implementing /// the `ViewAssistant` trait. pub struct ViewController { #[allow(unused)] view: fidl_fuchsia_ui_viewsv1::ViewProxy, view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy, session: SessionPtr, import_node: ImportNode, #[allow(unused)] key: ViewKey, assistant: ViewAssistantPtr, metrics: Size, physical_size: Size, logical_size: Size, } impl ViewController { pub(crate) fn new( app: &mut App, view_token: gfx::ExportToken, key: ViewKey, ) -> Result<ViewController, Error> { let (view, view_server_end) = create_proxy()?; let (view_listener, view_listener_request) = create_endpoints()?; let (mine, theirs) = zx::EventPair::create()?; app.view_manager.create_view2( view_server_end, view_token.value, view_listener, theirs, None, )?; let (session_listener, session_listener_request) = create_endpoints()?; let (session_proxy, session_request) = create_proxy()?; app.scenic.create_session(session_request, Some(session_listener))?; let session = Session::new(session_proxy); let mut view_assistant = app.create_view_assistant(&session)?; let mut import_node = ImportNode::new(session.clone(), mine); let (mut view_container, view_container_request) = create_proxy()?; view.get_container(view_container_request)?; let context = ViewAssistantContext { view_container: &mut view_container, import_node: &mut import_node, session: &session, key, logical_size: Size::zero(), size: Size::zero(), metrics: Size::zero(), messages: Vec::new(), }; view_assistant.setup(&context)?; let view_controller = ViewController { view, view_container: view_container, session, import_node, metrics: Size::zero(), physical_size: Size::zero(), logical_size: Size::zero(), key, assistant: view_assistant, }; Self::setup_session_listener(key, session_listener_request)?; Self::setup_view_listener(key, view_listener_request)?; Ok(view_controller) } fn
( key: ViewKey, session_listener_request: ServerEnd<SessionListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( session_listener_request .into_stream()? .map_ok(move |request| match request { SessionListenerRequest::OnScenicEvent { events,.. } => App::with(|app| { app.with_view(key, |view| { view.handle_session_events(events); }) }), _ => (), }) .try_collect::<()>() .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn setup_view_listener( key: ViewKey, view_listener_request: ServerEnd<ViewListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( view_listener_request .into_stream()? .try_for_each( move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| { App::with(|app| { app.with_view(key, |view| { view.handle_properties_changed(&properties); }); }); futures::future::ready(responder.send()) }, ) .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn update(&mut self) { let context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e)); self.present(); } fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) { events.iter().for_each(|event| match event { fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => { self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } fidl_fuchsia_ui_scenic::Event::Input(event) => { let mut context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant .handle_input_event(&mut context, &event) .unwrap_or_else(|e| eprintln!("handle_event: {:?}", e)); for msg in context.messages { self.send_message(&msg); } self.update(); } _ => (), }); } fn present(&self) { fasync::spawn_local( self.session .lock() .present(0) .map_ok(|_| ()) .unwrap_or_else(|e| panic!("present error: {:?}", e)), ); } fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) { if let Some(ref view_properties) = properties.view_layout { self.physical_size = Size::new(view_properties.size.width, view_properties.size.height); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } } /// This method sends an arbitrary message to this view. If it is not /// handled directly by `ViewController::send_message` it will be forwarded /// to the view assistant. pub fn send_message(&mut self, msg: &Any) { if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() { match view_msg { ViewMessages::Update => { self.update(); } } } else { self.assistant.handle_message(msg); } } }
setup_session_listener
identifier_name
view.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use crate::{app::App, geometry::Size}; use failure::Error; use fidl::endpoints::{create_endpoints, create_proxy, ServerEnd}; use fidl_fuchsia_ui_gfx as gfx; use fidl_fuchsia_ui_input; use fidl_fuchsia_ui_scenic::{SessionListenerMarker, SessionListenerRequest}; use fidl_fuchsia_ui_viewsv1::{ViewListenerMarker, ViewListenerRequest}; use fuchsia_async as fasync; use fuchsia_scenic::{ImportNode, Session, SessionPtr}; use fuchsia_zircon as zx; use futures::{TryFutureExt, TryStreamExt}; use std::any::Any; /// enum that defines all messages sent with `App::send_message` that /// the view struct will understand and process. pub enum ViewMessages { /// Message that requests that a view redraw itself. Update, } /// parameter struct passed to setup and update trait methods. #[allow(missing_docs)] pub struct ViewAssistantContext<'a> { pub view_container: &'a mut fidl_fuchsia_ui_viewsv1::ViewContainerProxy, pub import_node: &'a ImportNode, pub session: &'a SessionPtr, pub key: ViewKey, pub logical_size: Size, pub size: Size, pub metrics: Size, pub messages: Vec<Box<dyn Any>>, } impl<'a> ViewAssistantContext<'a> { /// Queue up a message for delivery pub fn queue_message<A: Any>(&mut self, message: A) { self.messages.push(Box::new(message)); } } /// Trait that allows mod developers to customize the behavior of view controllers. pub trait ViewAssistant { /// This method is called once when a view is created. It is a good point to create scenic /// commands that apply throughout the lifetime of the view. fn setup(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when a view controller has been asked to update the view. fn update(&mut self, context: &ViewAssistantContext) -> Result<(), Error>; /// This method is called when input events come from scenic to this view. fn handle_input_event( &mut self, _context: &mut ViewAssistantContext, _event: &fidl_fuchsia_ui_input::InputEvent, ) -> Result<(), Error> { Ok(()) } /// This method is called when `App::send_message` is called with the associated /// view controller's `ViewKey` and the view controller does not handle the message. fn handle_message(&mut self, _message: &Any) {} } /// Reference to an app assistant. _This type is likely to change in the future so /// using this type alias might make for easier forward migration._ pub type ViewAssistantPtr = Box<dyn ViewAssistant>; /// Key identifying a view. pub type ViewKey = u64; /// This struct takes care of all the boilerplate needed for implementing a Fuchsia /// view, forwarding the interesting implementation points to a struct implementing /// the `ViewAssistant` trait. pub struct ViewController { #[allow(unused)] view: fidl_fuchsia_ui_viewsv1::ViewProxy, view_container: fidl_fuchsia_ui_viewsv1::ViewContainerProxy, session: SessionPtr, import_node: ImportNode, #[allow(unused)] key: ViewKey, assistant: ViewAssistantPtr, metrics: Size, physical_size: Size, logical_size: Size, } impl ViewController { pub(crate) fn new( app: &mut App, view_token: gfx::ExportToken, key: ViewKey, ) -> Result<ViewController, Error> { let (view, view_server_end) = create_proxy()?; let (view_listener, view_listener_request) = create_endpoints()?; let (mine, theirs) = zx::EventPair::create()?; app.view_manager.create_view2( view_server_end,
view_listener, theirs, None, )?; let (session_listener, session_listener_request) = create_endpoints()?; let (session_proxy, session_request) = create_proxy()?; app.scenic.create_session(session_request, Some(session_listener))?; let session = Session::new(session_proxy); let mut view_assistant = app.create_view_assistant(&session)?; let mut import_node = ImportNode::new(session.clone(), mine); let (mut view_container, view_container_request) = create_proxy()?; view.get_container(view_container_request)?; let context = ViewAssistantContext { view_container: &mut view_container, import_node: &mut import_node, session: &session, key, logical_size: Size::zero(), size: Size::zero(), metrics: Size::zero(), messages: Vec::new(), }; view_assistant.setup(&context)?; let view_controller = ViewController { view, view_container: view_container, session, import_node, metrics: Size::zero(), physical_size: Size::zero(), logical_size: Size::zero(), key, assistant: view_assistant, }; Self::setup_session_listener(key, session_listener_request)?; Self::setup_view_listener(key, view_listener_request)?; Ok(view_controller) } fn setup_session_listener( key: ViewKey, session_listener_request: ServerEnd<SessionListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( session_listener_request .into_stream()? .map_ok(move |request| match request { SessionListenerRequest::OnScenicEvent { events,.. } => App::with(|app| { app.with_view(key, |view| { view.handle_session_events(events); }) }), _ => (), }) .try_collect::<()>() .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn setup_view_listener( key: ViewKey, view_listener_request: ServerEnd<ViewListenerMarker>, ) -> Result<(), Error> { fasync::spawn_local( view_listener_request .into_stream()? .try_for_each( move |ViewListenerRequest::OnPropertiesChanged { properties, responder }| { App::with(|app| { app.with_view(key, |view| { view.handle_properties_changed(&properties); }); }); futures::future::ready(responder.send()) }, ) .unwrap_or_else(|e| eprintln!("view listener error: {:?}", e)), ); Ok(()) } fn update(&mut self) { let context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant.update(&context).unwrap_or_else(|e| panic!("Update error: {:?}", e)); self.present(); } fn handle_session_events(&mut self, events: Vec<fidl_fuchsia_ui_scenic::Event>) { events.iter().for_each(|event| match event { fidl_fuchsia_ui_scenic::Event::Gfx(gfx::Event::Metrics(event)) => { self.metrics = Size::new(event.metrics.scale_x, event.metrics.scale_y); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } fidl_fuchsia_ui_scenic::Event::Input(event) => { let mut context = ViewAssistantContext { view_container: &mut self.view_container, import_node: &mut self.import_node, session: &self.session, key: self.key, logical_size: self.logical_size, size: self.physical_size, metrics: self.metrics, messages: Vec::new(), }; self.assistant .handle_input_event(&mut context, &event) .unwrap_or_else(|e| eprintln!("handle_event: {:?}", e)); for msg in context.messages { self.send_message(&msg); } self.update(); } _ => (), }); } fn present(&self) { fasync::spawn_local( self.session .lock() .present(0) .map_ok(|_| ()) .unwrap_or_else(|e| panic!("present error: {:?}", e)), ); } fn handle_properties_changed(&mut self, properties: &fidl_fuchsia_ui_viewsv1::ViewProperties) { if let Some(ref view_properties) = properties.view_layout { self.physical_size = Size::new(view_properties.size.width, view_properties.size.height); self.logical_size = Size::new( self.physical_size.width * self.metrics.width, self.physical_size.height * self.metrics.height, ); self.update(); } } /// This method sends an arbitrary message to this view. If it is not /// handled directly by `ViewController::send_message` it will be forwarded /// to the view assistant. pub fn send_message(&mut self, msg: &Any) { if let Some(view_msg) = msg.downcast_ref::<ViewMessages>() { match view_msg { ViewMessages::Update => { self.update(); } } } else { self.assistant.handle_message(msg); } } }
view_token.value,
random_line_split
main.rs
#[macro_use] extern crate log; use std::sync::Arc; use amethyst::{ assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat}, core::{ bundle::SystemBundle, math::Vector3, transform::{Transform, TransformBundle}, Float, }, ecs::{ Dispatcher, DispatcherBuilder, Entity, Read, ReadExpect, Resources, System, SystemData, WriteStorage, }, input::{InputBundle, InputHandler, StringBindings}, prelude::*, renderer::{ formats::texture::ImageFormat, pass::{DrawDebugLinesDesc, DrawFlat2DDesc}, rendy::{ factory::Factory, graph::{ render::{RenderGroupDesc, SubpassBuilder}, GraphBuilder, }, hal::{format::Format, image}, mesh::{Normal, Position, TexCoord}, }, sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle}, types::DefaultBackend, GraphCreator, RenderingSystem, Texture, }, ui::UiBundle, utils::{application_root_dir, scene::BasicScenePrefab}, window::{ScreenDimensions, Window, WindowBundle}, }; use amethyst_physics::PhysicsBundle; use specs_physics::{ bodies::BodyStatus, colliders::Shape, PhysicsBody, PhysicsBodyBuilder, PhysicsColliderBuilder, }; pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>; /// The Player `Resources` contains player relevant data and holds a reference /// to the `Entity` that defines the player. #[derive(Debug)] pub struct Player { /// The player `Entity`. pub player: Entity, } #[derive(Default)] struct GameState<'a, 'b> { /// `State` specific dispatcher. dispatcher: Option<Dispatcher<'a, 'b>>, } impl<'a, 'b> SimpleState for GameState<'a, 'b> { fn on_start(&mut self, data: StateData<GameData>) { info!("GameState.on_start"); let world = data.world; // load scene handle let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| { loader.load("prefab/scene.ron", RonFormat, ()) }); // load sprite sheets let character_handle = self.load_sprite_sheet("texture/character.png", "texture/character.ron", world); let objects_handle = self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world); // create dispatcher self.create_dispatcher(world); // initialise scene world.create_entity().with(scene_handle.clone()).build(); // create player Entity let player = world .create_entity() .with(SpriteRender { sprite_sheet: character_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 22.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(25.0, 50.0, 0.0))) .build(); // create the player Resource world.add_resource(Player { player }); // create obstacle Entity world .create_entity() .with(SpriteRender { sprite_sheet: objects_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 16.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(75.0, 50.0, 0.0))) .build(); } fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans { if let Some(dispatcher) = self.dispatcher.as_mut() { dispatcher.dispatch(&data.world.res); } Trans::None } } impl<'a, 'b> GameState<'a, 'b> { fn
( &mut self, texture_path: &str, ron_path: &str, world: &mut World, ) -> SpriteSheetHandle { // Load the sprite sheet necessary to render the graphics. // The texture is the pixel data // `sprite_sheet` is the layout of the sprites on the image // `texture_handle` is a cloneable reference to the texture let texture_handle = { let loader = world.read_resource::<Loader>(); let texture_storage = world.read_resource::<AssetStorage<Texture>>(); loader.load(texture_path, ImageFormat::default(), (), &texture_storage) }; let loader = world.read_resource::<Loader>(); let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>(); loader.load( ron_path, // Here we load the associated ron file SpriteSheetFormat(texture_handle), (), &sprite_sheet_store, ) } /// Creates the `State` specific `Dispatcher`. fn create_dispatcher(&mut self, world: &mut World) { if self.dispatcher.is_none() { let mut dispatcher_builder = DispatcherBuilder::new(); PhysicsBundle::default() .with_debug_lines() .build(&mut dispatcher_builder) .expect("Failed to register PhysicsBundle"); let mut dispatcher = dispatcher_builder.build(); dispatcher.setup(&mut world.res); self.dispatcher = Some(dispatcher); } } } #[derive(Default)] struct PlayerMovementSystem; impl<'s> System<'s> for PlayerMovementSystem { type SystemData = ( Read<'s, InputHandler<StringBindings>>, ReadExpect<'s, Player>, WriteStorage<'s, PhysicsBody<Float>>, ); fn run(&mut self, data: Self::SystemData) { let (input, player, mut physics_bodies) = data; if let Some(physics_body) = physics_bodies.get_mut(player.player) { // handle movement on X axis if let Some(movement) = input.axis_value("leftright") { physics_body.velocity.x = movement.into(); } // handle movement on Y axis if let Some(movement) = input.axis_value("updown") { physics_body.velocity.y = movement.into(); } } } } fn main() -> amethyst::Result<()> { //amethyst::start_logger(Default::default()); amethyst::Logger::from_config(Default::default()) .level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn) .level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::dynamic", amethyst::LogLevelFilter::Warn, ) .level_for( "rendy_graph::node::render::pass", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn) .level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::linear", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_wsi", amethyst::LogLevelFilter::Warn) .start(); let app_root = application_root_dir()?; // display configuration let display_config_path = app_root.join("examples/resources/display_config.ron"); // key bindings let key_bindings_path = app_root.join("examples/resources/input.ron"); let game_data = GameDataBuilder::default() .with_bundle(WindowBundle::from_config_path(display_config_path))? .with_bundle(TransformBundle::new())? .with_bundle( InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?, )? .with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())? //.with_bundle(PhysicsBundle::default().with_debug_lines())? .with( Processor::<SpriteSheet>::new(), "sprite_sheet_processor", &[], ) .with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[]) .with( PlayerMovementSystem::default(), "player_movement_system", &[], ) .with_thread_local(RenderingSystem::<DefaultBackend, _>::new( ExampleGraph::default(), )); let mut game = Application::build(app_root.join("examples/assets"), GameState::default())? .build(game_data)?; game.run(); Ok(()) } // This graph structure is used for creating a proper `RenderGraph` for // rendering. A renderGraph can be thought of as the stages during a render // pass. In our case, we are only executing one subpass (DrawFlat2D, or the // sprite pass). This graph also needs to be rebuilt whenever the window is // resized, so the boilerplate code for that operation is also here. #[derive(Default)] struct ExampleGraph { dimensions: Option<ScreenDimensions>, surface_format: Option<Format>, dirty: bool, } impl GraphCreator<DefaultBackend> for ExampleGraph { // This trait method reports to the renderer if the graph must be rebuilt, // usually because the window has been resized. This implementation checks // the screen size and returns true if it has changed. fn rebuild(&mut self, res: &Resources) -> bool { // Rebuild when dimensions change, but wait until at least two frames have the // same. let new_dimensions = res.try_fetch::<ScreenDimensions>(); use std::ops::Deref; if self.dimensions.as_ref()!= new_dimensions.as_ref().map(|d| d.deref()) { self.dirty = true; self.dimensions = new_dimensions.map(|d| d.clone()); return false; } return self.dirty; } // This is the core of a RenderGraph, which is building the actual graph with // subpasses and target images. fn builder( &mut self, factory: &mut Factory<DefaultBackend>, res: &Resources, ) -> GraphBuilder<DefaultBackend, Resources> { use amethyst::renderer::rendy::{ graph::present::PresentNode, hal::command::{ClearDepthStencil, ClearValue}, }; self.dirty = false; // Retrieve a reference to the target window, which is created by the // WindowBundle let window = <ReadExpect<'_, Arc<Window>>>::fetch(res); // Create a new drawing surface in our window let surface = factory.create_surface(&window); // cache surface format to speed things up let surface_format = *self .surface_format .get_or_insert_with(|| factory.get_surface_format(&surface)); let dimensions = self.dimensions.as_ref().unwrap(); let window_kind = image::Kind::D2( dbg!(dimensions.width()) as u32, dimensions.height() as u32, 1, 1, ); // Begin building our RenderGraph let mut graph_builder = GraphBuilder::new(); let color = graph_builder.create_image( window_kind, 1, surface_format, Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())), ); let depth = graph_builder.create_image( window_kind, 1, Format::D32Sfloat, Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))), ); // Create our single `Subpass`, which is the DrawFlat2D pass. // We pass the subpass builder a description of our pass for construction let sprite = graph_builder.add_node( SubpassBuilder::new() .with_group(DrawDebugLinesDesc::new().builder()) .with_group(DrawFlat2DDesc::new().builder()) .with_color(color) .with_depth_stencil(depth) .into_pass(), ); // Finally, add the pass to the graph let _present = graph_builder .add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite)); graph_builder } }
load_sprite_sheet
identifier_name
main.rs
#[macro_use] extern crate log; use std::sync::Arc; use amethyst::{ assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat}, core::{ bundle::SystemBundle, math::Vector3, transform::{Transform, TransformBundle}, Float, }, ecs::{ Dispatcher, DispatcherBuilder, Entity, Read, ReadExpect, Resources, System, SystemData, WriteStorage, }, input::{InputBundle, InputHandler, StringBindings}, prelude::*, renderer::{ formats::texture::ImageFormat, pass::{DrawDebugLinesDesc, DrawFlat2DDesc}, rendy::{ factory::Factory, graph::{ render::{RenderGroupDesc, SubpassBuilder}, GraphBuilder, }, hal::{format::Format, image}, mesh::{Normal, Position, TexCoord}, }, sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle}, types::DefaultBackend, GraphCreator, RenderingSystem, Texture, }, ui::UiBundle, utils::{application_root_dir, scene::BasicScenePrefab}, window::{ScreenDimensions, Window, WindowBundle}, }; use amethyst_physics::PhysicsBundle; use specs_physics::{ bodies::BodyStatus, colliders::Shape, PhysicsBody, PhysicsBodyBuilder, PhysicsColliderBuilder, }; pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>; /// The Player `Resources` contains player relevant data and holds a reference /// to the `Entity` that defines the player. #[derive(Debug)] pub struct Player { /// The player `Entity`. pub player: Entity, } #[derive(Default)] struct GameState<'a, 'b> { /// `State` specific dispatcher. dispatcher: Option<Dispatcher<'a, 'b>>, } impl<'a, 'b> SimpleState for GameState<'a, 'b> { fn on_start(&mut self, data: StateData<GameData>) { info!("GameState.on_start"); let world = data.world; // load scene handle let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| { loader.load("prefab/scene.ron", RonFormat, ()) }); // load sprite sheets let character_handle = self.load_sprite_sheet("texture/character.png", "texture/character.ron", world); let objects_handle = self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world); // create dispatcher self.create_dispatcher(world); // initialise scene world.create_entity().with(scene_handle.clone()).build(); // create player Entity let player = world .create_entity() .with(SpriteRender { sprite_sheet: character_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 22.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(25.0, 50.0, 0.0))) .build(); // create the player Resource world.add_resource(Player { player }); // create obstacle Entity world .create_entity() .with(SpriteRender { sprite_sheet: objects_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 16.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(75.0, 50.0, 0.0))) .build(); } fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans { if let Some(dispatcher) = self.dispatcher.as_mut() { dispatcher.dispatch(&data.world.res); } Trans::None } } impl<'a, 'b> GameState<'a, 'b> { fn load_sprite_sheet( &mut self, texture_path: &str, ron_path: &str, world: &mut World, ) -> SpriteSheetHandle { // Load the sprite sheet necessary to render the graphics. // The texture is the pixel data // `sprite_sheet` is the layout of the sprites on the image // `texture_handle` is a cloneable reference to the texture let texture_handle = { let loader = world.read_resource::<Loader>(); let texture_storage = world.read_resource::<AssetStorage<Texture>>(); loader.load(texture_path, ImageFormat::default(), (), &texture_storage) }; let loader = world.read_resource::<Loader>(); let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>(); loader.load( ron_path, // Here we load the associated ron file SpriteSheetFormat(texture_handle), (), &sprite_sheet_store, ) } /// Creates the `State` specific `Dispatcher`. fn create_dispatcher(&mut self, world: &mut World) { if self.dispatcher.is_none() { let mut dispatcher_builder = DispatcherBuilder::new(); PhysicsBundle::default() .with_debug_lines() .build(&mut dispatcher_builder) .expect("Failed to register PhysicsBundle"); let mut dispatcher = dispatcher_builder.build(); dispatcher.setup(&mut world.res); self.dispatcher = Some(dispatcher); } } } #[derive(Default)] struct PlayerMovementSystem; impl<'s> System<'s> for PlayerMovementSystem { type SystemData = ( Read<'s, InputHandler<StringBindings>>, ReadExpect<'s, Player>, WriteStorage<'s, PhysicsBody<Float>>, ); fn run(&mut self, data: Self::SystemData) { let (input, player, mut physics_bodies) = data; if let Some(physics_body) = physics_bodies.get_mut(player.player) { // handle movement on X axis if let Some(movement) = input.axis_value("leftright") { physics_body.velocity.x = movement.into(); } // handle movement on Y axis if let Some(movement) = input.axis_value("updown") { physics_body.velocity.y = movement.into(); } } } } fn main() -> amethyst::Result<()>
.start(); let app_root = application_root_dir()?; // display configuration let display_config_path = app_root.join("examples/resources/display_config.ron"); // key bindings let key_bindings_path = app_root.join("examples/resources/input.ron"); let game_data = GameDataBuilder::default() .with_bundle(WindowBundle::from_config_path(display_config_path))? .with_bundle(TransformBundle::new())? .with_bundle( InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?, )? .with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())? //.with_bundle(PhysicsBundle::default().with_debug_lines())? .with( Processor::<SpriteSheet>::new(), "sprite_sheet_processor", &[], ) .with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[]) .with( PlayerMovementSystem::default(), "player_movement_system", &[], ) .with_thread_local(RenderingSystem::<DefaultBackend, _>::new( ExampleGraph::default(), )); let mut game = Application::build(app_root.join("examples/assets"), GameState::default())? .build(game_data)?; game.run(); Ok(()) } // This graph structure is used for creating a proper `RenderGraph` for // rendering. A renderGraph can be thought of as the stages during a render // pass. In our case, we are only executing one subpass (DrawFlat2D, or the // sprite pass). This graph also needs to be rebuilt whenever the window is // resized, so the boilerplate code for that operation is also here. #[derive(Default)] struct ExampleGraph { dimensions: Option<ScreenDimensions>, surface_format: Option<Format>, dirty: bool, } impl GraphCreator<DefaultBackend> for ExampleGraph { // This trait method reports to the renderer if the graph must be rebuilt, // usually because the window has been resized. This implementation checks // the screen size and returns true if it has changed. fn rebuild(&mut self, res: &Resources) -> bool { // Rebuild when dimensions change, but wait until at least two frames have the // same. let new_dimensions = res.try_fetch::<ScreenDimensions>(); use std::ops::Deref; if self.dimensions.as_ref()!= new_dimensions.as_ref().map(|d| d.deref()) { self.dirty = true; self.dimensions = new_dimensions.map(|d| d.clone()); return false; } return self.dirty; } // This is the core of a RenderGraph, which is building the actual graph with // subpasses and target images. fn builder( &mut self, factory: &mut Factory<DefaultBackend>, res: &Resources, ) -> GraphBuilder<DefaultBackend, Resources> { use amethyst::renderer::rendy::{ graph::present::PresentNode, hal::command::{ClearDepthStencil, ClearValue}, }; self.dirty = false; // Retrieve a reference to the target window, which is created by the // WindowBundle let window = <ReadExpect<'_, Arc<Window>>>::fetch(res); // Create a new drawing surface in our window let surface = factory.create_surface(&window); // cache surface format to speed things up let surface_format = *self .surface_format .get_or_insert_with(|| factory.get_surface_format(&surface)); let dimensions = self.dimensions.as_ref().unwrap(); let window_kind = image::Kind::D2( dbg!(dimensions.width()) as u32, dimensions.height() as u32, 1, 1, ); // Begin building our RenderGraph let mut graph_builder = GraphBuilder::new(); let color = graph_builder.create_image( window_kind, 1, surface_format, Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())), ); let depth = graph_builder.create_image( window_kind, 1, Format::D32Sfloat, Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))), ); // Create our single `Subpass`, which is the DrawFlat2D pass. // We pass the subpass builder a description of our pass for construction let sprite = graph_builder.add_node( SubpassBuilder::new() .with_group(DrawDebugLinesDesc::new().builder()) .with_group(DrawFlat2DDesc::new().builder()) .with_color(color) .with_depth_stencil(depth) .into_pass(), ); // Finally, add the pass to the graph let _present = graph_builder .add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite)); graph_builder } }
{ //amethyst::start_logger(Default::default()); amethyst::Logger::from_config(Default::default()) .level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn) .level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::dynamic", amethyst::LogLevelFilter::Warn, ) .level_for( "rendy_graph::node::render::pass", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn) .level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::linear", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_wsi", amethyst::LogLevelFilter::Warn)
identifier_body
main.rs
#[macro_use] extern crate log; use std::sync::Arc; use amethyst::{ assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat}, core::{ bundle::SystemBundle, math::Vector3, transform::{Transform, TransformBundle}, Float, }, ecs::{ Dispatcher, DispatcherBuilder, Entity, Read, ReadExpect, Resources,
prelude::*, renderer::{ formats::texture::ImageFormat, pass::{DrawDebugLinesDesc, DrawFlat2DDesc}, rendy::{ factory::Factory, graph::{ render::{RenderGroupDesc, SubpassBuilder}, GraphBuilder, }, hal::{format::Format, image}, mesh::{Normal, Position, TexCoord}, }, sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle}, types::DefaultBackend, GraphCreator, RenderingSystem, Texture, }, ui::UiBundle, utils::{application_root_dir, scene::BasicScenePrefab}, window::{ScreenDimensions, Window, WindowBundle}, }; use amethyst_physics::PhysicsBundle; use specs_physics::{ bodies::BodyStatus, colliders::Shape, PhysicsBody, PhysicsBodyBuilder, PhysicsColliderBuilder, }; pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>; /// The Player `Resources` contains player relevant data and holds a reference /// to the `Entity` that defines the player. #[derive(Debug)] pub struct Player { /// The player `Entity`. pub player: Entity, } #[derive(Default)] struct GameState<'a, 'b> { /// `State` specific dispatcher. dispatcher: Option<Dispatcher<'a, 'b>>, } impl<'a, 'b> SimpleState for GameState<'a, 'b> { fn on_start(&mut self, data: StateData<GameData>) { info!("GameState.on_start"); let world = data.world; // load scene handle let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| { loader.load("prefab/scene.ron", RonFormat, ()) }); // load sprite sheets let character_handle = self.load_sprite_sheet("texture/character.png", "texture/character.ron", world); let objects_handle = self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world); // create dispatcher self.create_dispatcher(world); // initialise scene world.create_entity().with(scene_handle.clone()).build(); // create player Entity let player = world .create_entity() .with(SpriteRender { sprite_sheet: character_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 22.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(25.0, 50.0, 0.0))) .build(); // create the player Resource world.add_resource(Player { player }); // create obstacle Entity world .create_entity() .with(SpriteRender { sprite_sheet: objects_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 16.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(75.0, 50.0, 0.0))) .build(); } fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans { if let Some(dispatcher) = self.dispatcher.as_mut() { dispatcher.dispatch(&data.world.res); } Trans::None } } impl<'a, 'b> GameState<'a, 'b> { fn load_sprite_sheet( &mut self, texture_path: &str, ron_path: &str, world: &mut World, ) -> SpriteSheetHandle { // Load the sprite sheet necessary to render the graphics. // The texture is the pixel data // `sprite_sheet` is the layout of the sprites on the image // `texture_handle` is a cloneable reference to the texture let texture_handle = { let loader = world.read_resource::<Loader>(); let texture_storage = world.read_resource::<AssetStorage<Texture>>(); loader.load(texture_path, ImageFormat::default(), (), &texture_storage) }; let loader = world.read_resource::<Loader>(); let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>(); loader.load( ron_path, // Here we load the associated ron file SpriteSheetFormat(texture_handle), (), &sprite_sheet_store, ) } /// Creates the `State` specific `Dispatcher`. fn create_dispatcher(&mut self, world: &mut World) { if self.dispatcher.is_none() { let mut dispatcher_builder = DispatcherBuilder::new(); PhysicsBundle::default() .with_debug_lines() .build(&mut dispatcher_builder) .expect("Failed to register PhysicsBundle"); let mut dispatcher = dispatcher_builder.build(); dispatcher.setup(&mut world.res); self.dispatcher = Some(dispatcher); } } } #[derive(Default)] struct PlayerMovementSystem; impl<'s> System<'s> for PlayerMovementSystem { type SystemData = ( Read<'s, InputHandler<StringBindings>>, ReadExpect<'s, Player>, WriteStorage<'s, PhysicsBody<Float>>, ); fn run(&mut self, data: Self::SystemData) { let (input, player, mut physics_bodies) = data; if let Some(physics_body) = physics_bodies.get_mut(player.player) { // handle movement on X axis if let Some(movement) = input.axis_value("leftright") { physics_body.velocity.x = movement.into(); } // handle movement on Y axis if let Some(movement) = input.axis_value("updown") { physics_body.velocity.y = movement.into(); } } } } fn main() -> amethyst::Result<()> { //amethyst::start_logger(Default::default()); amethyst::Logger::from_config(Default::default()) .level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn) .level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::dynamic", amethyst::LogLevelFilter::Warn, ) .level_for( "rendy_graph::node::render::pass", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn) .level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::linear", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_wsi", amethyst::LogLevelFilter::Warn) .start(); let app_root = application_root_dir()?; // display configuration let display_config_path = app_root.join("examples/resources/display_config.ron"); // key bindings let key_bindings_path = app_root.join("examples/resources/input.ron"); let game_data = GameDataBuilder::default() .with_bundle(WindowBundle::from_config_path(display_config_path))? .with_bundle(TransformBundle::new())? .with_bundle( InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?, )? .with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())? //.with_bundle(PhysicsBundle::default().with_debug_lines())? .with( Processor::<SpriteSheet>::new(), "sprite_sheet_processor", &[], ) .with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[]) .with( PlayerMovementSystem::default(), "player_movement_system", &[], ) .with_thread_local(RenderingSystem::<DefaultBackend, _>::new( ExampleGraph::default(), )); let mut game = Application::build(app_root.join("examples/assets"), GameState::default())? .build(game_data)?; game.run(); Ok(()) } // This graph structure is used for creating a proper `RenderGraph` for // rendering. A renderGraph can be thought of as the stages during a render // pass. In our case, we are only executing one subpass (DrawFlat2D, or the // sprite pass). This graph also needs to be rebuilt whenever the window is // resized, so the boilerplate code for that operation is also here. #[derive(Default)] struct ExampleGraph { dimensions: Option<ScreenDimensions>, surface_format: Option<Format>, dirty: bool, } impl GraphCreator<DefaultBackend> for ExampleGraph { // This trait method reports to the renderer if the graph must be rebuilt, // usually because the window has been resized. This implementation checks // the screen size and returns true if it has changed. fn rebuild(&mut self, res: &Resources) -> bool { // Rebuild when dimensions change, but wait until at least two frames have the // same. let new_dimensions = res.try_fetch::<ScreenDimensions>(); use std::ops::Deref; if self.dimensions.as_ref()!= new_dimensions.as_ref().map(|d| d.deref()) { self.dirty = true; self.dimensions = new_dimensions.map(|d| d.clone()); return false; } return self.dirty; } // This is the core of a RenderGraph, which is building the actual graph with // subpasses and target images. fn builder( &mut self, factory: &mut Factory<DefaultBackend>, res: &Resources, ) -> GraphBuilder<DefaultBackend, Resources> { use amethyst::renderer::rendy::{ graph::present::PresentNode, hal::command::{ClearDepthStencil, ClearValue}, }; self.dirty = false; // Retrieve a reference to the target window, which is created by the // WindowBundle let window = <ReadExpect<'_, Arc<Window>>>::fetch(res); // Create a new drawing surface in our window let surface = factory.create_surface(&window); // cache surface format to speed things up let surface_format = *self .surface_format .get_or_insert_with(|| factory.get_surface_format(&surface)); let dimensions = self.dimensions.as_ref().unwrap(); let window_kind = image::Kind::D2( dbg!(dimensions.width()) as u32, dimensions.height() as u32, 1, 1, ); // Begin building our RenderGraph let mut graph_builder = GraphBuilder::new(); let color = graph_builder.create_image( window_kind, 1, surface_format, Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())), ); let depth = graph_builder.create_image( window_kind, 1, Format::D32Sfloat, Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))), ); // Create our single `Subpass`, which is the DrawFlat2D pass. // We pass the subpass builder a description of our pass for construction let sprite = graph_builder.add_node( SubpassBuilder::new() .with_group(DrawDebugLinesDesc::new().builder()) .with_group(DrawFlat2DDesc::new().builder()) .with_color(color) .with_depth_stencil(depth) .into_pass(), ); // Finally, add the pass to the graph let _present = graph_builder .add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite)); graph_builder } }
System, SystemData, WriteStorage, }, input::{InputBundle, InputHandler, StringBindings},
random_line_split
main.rs
#[macro_use] extern crate log; use std::sync::Arc; use amethyst::{ assets::{AssetStorage, Loader, PrefabLoader, PrefabLoaderSystem, Processor, RonFormat}, core::{ bundle::SystemBundle, math::Vector3, transform::{Transform, TransformBundle}, Float, }, ecs::{ Dispatcher, DispatcherBuilder, Entity, Read, ReadExpect, Resources, System, SystemData, WriteStorage, }, input::{InputBundle, InputHandler, StringBindings}, prelude::*, renderer::{ formats::texture::ImageFormat, pass::{DrawDebugLinesDesc, DrawFlat2DDesc}, rendy::{ factory::Factory, graph::{ render::{RenderGroupDesc, SubpassBuilder}, GraphBuilder, }, hal::{format::Format, image}, mesh::{Normal, Position, TexCoord}, }, sprite::{SpriteRender, SpriteSheet, SpriteSheetFormat, SpriteSheetHandle}, types::DefaultBackend, GraphCreator, RenderingSystem, Texture, }, ui::UiBundle, utils::{application_root_dir, scene::BasicScenePrefab}, window::{ScreenDimensions, Window, WindowBundle}, }; use amethyst_physics::PhysicsBundle; use specs_physics::{ bodies::BodyStatus, colliders::Shape, PhysicsBody, PhysicsBodyBuilder, PhysicsColliderBuilder, }; pub type GamePrefabData = BasicScenePrefab<(Vec<Position>, Vec<Normal>, Vec<TexCoord>)>; /// The Player `Resources` contains player relevant data and holds a reference /// to the `Entity` that defines the player. #[derive(Debug)] pub struct Player { /// The player `Entity`. pub player: Entity, } #[derive(Default)] struct GameState<'a, 'b> { /// `State` specific dispatcher. dispatcher: Option<Dispatcher<'a, 'b>>, } impl<'a, 'b> SimpleState for GameState<'a, 'b> { fn on_start(&mut self, data: StateData<GameData>) { info!("GameState.on_start"); let world = data.world; // load scene handle let scene_handle = world.exec(|loader: PrefabLoader<'_, GamePrefabData>| { loader.load("prefab/scene.ron", RonFormat, ()) }); // load sprite sheets let character_handle = self.load_sprite_sheet("texture/character.png", "texture/character.ron", world); let objects_handle = self.load_sprite_sheet("texture/objects.png", "texture/objects.ron", world); // create dispatcher self.create_dispatcher(world); // initialise scene world.create_entity().with(scene_handle.clone()).build(); // create player Entity let player = world .create_entity() .with(SpriteRender { sprite_sheet: character_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Dynamic).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 22.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(25.0, 50.0, 0.0))) .build(); // create the player Resource world.add_resource(Player { player }); // create obstacle Entity world .create_entity() .with(SpriteRender { sprite_sheet: objects_handle.clone(), sprite_number: 0, }) .with(PhysicsBodyBuilder::<Float>::from(BodyStatus::Static).build()) .with( PhysicsColliderBuilder::<Float>::from(Shape::Rectangle( 15.0.into(), 16.0.into(), 1.0.into(), )) .build(), ) .with(Transform::from(Vector3::new(75.0, 50.0, 0.0))) .build(); } fn fixed_update(&mut self, data: StateData<GameData>) -> SimpleTrans { if let Some(dispatcher) = self.dispatcher.as_mut()
Trans::None } } impl<'a, 'b> GameState<'a, 'b> { fn load_sprite_sheet( &mut self, texture_path: &str, ron_path: &str, world: &mut World, ) -> SpriteSheetHandle { // Load the sprite sheet necessary to render the graphics. // The texture is the pixel data // `sprite_sheet` is the layout of the sprites on the image // `texture_handle` is a cloneable reference to the texture let texture_handle = { let loader = world.read_resource::<Loader>(); let texture_storage = world.read_resource::<AssetStorage<Texture>>(); loader.load(texture_path, ImageFormat::default(), (), &texture_storage) }; let loader = world.read_resource::<Loader>(); let sprite_sheet_store = world.read_resource::<AssetStorage<SpriteSheet>>(); loader.load( ron_path, // Here we load the associated ron file SpriteSheetFormat(texture_handle), (), &sprite_sheet_store, ) } /// Creates the `State` specific `Dispatcher`. fn create_dispatcher(&mut self, world: &mut World) { if self.dispatcher.is_none() { let mut dispatcher_builder = DispatcherBuilder::new(); PhysicsBundle::default() .with_debug_lines() .build(&mut dispatcher_builder) .expect("Failed to register PhysicsBundle"); let mut dispatcher = dispatcher_builder.build(); dispatcher.setup(&mut world.res); self.dispatcher = Some(dispatcher); } } } #[derive(Default)] struct PlayerMovementSystem; impl<'s> System<'s> for PlayerMovementSystem { type SystemData = ( Read<'s, InputHandler<StringBindings>>, ReadExpect<'s, Player>, WriteStorage<'s, PhysicsBody<Float>>, ); fn run(&mut self, data: Self::SystemData) { let (input, player, mut physics_bodies) = data; if let Some(physics_body) = physics_bodies.get_mut(player.player) { // handle movement on X axis if let Some(movement) = input.axis_value("leftright") { physics_body.velocity.x = movement.into(); } // handle movement on Y axis if let Some(movement) = input.axis_value("updown") { physics_body.velocity.y = movement.into(); } } } } fn main() -> amethyst::Result<()> { //amethyst::start_logger(Default::default()); amethyst::Logger::from_config(Default::default()) .level_for("gfx_backend_vulkan", amethyst::LogLevelFilter::Warn) .level_for("rendy_factory::factory", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::dynamic", amethyst::LogLevelFilter::Warn, ) .level_for( "rendy_graph::node::render::pass", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_graph::node::present", amethyst::LogLevelFilter::Warn) .level_for("rendy_graph::graph", amethyst::LogLevelFilter::Warn) .level_for( "rendy_memory::allocator::linear", amethyst::LogLevelFilter::Warn, ) .level_for("rendy_wsi", amethyst::LogLevelFilter::Warn) .start(); let app_root = application_root_dir()?; // display configuration let display_config_path = app_root.join("examples/resources/display_config.ron"); // key bindings let key_bindings_path = app_root.join("examples/resources/input.ron"); let game_data = GameDataBuilder::default() .with_bundle(WindowBundle::from_config_path(display_config_path))? .with_bundle(TransformBundle::new())? .with_bundle( InputBundle::<StringBindings>::new().with_bindings_from_file(key_bindings_path)?, )? .with_bundle(UiBundle::<DefaultBackend, StringBindings>::new())? //.with_bundle(PhysicsBundle::default().with_debug_lines())? .with( Processor::<SpriteSheet>::new(), "sprite_sheet_processor", &[], ) .with(PrefabLoaderSystem::<GamePrefabData>::default(), "", &[]) .with( PlayerMovementSystem::default(), "player_movement_system", &[], ) .with_thread_local(RenderingSystem::<DefaultBackend, _>::new( ExampleGraph::default(), )); let mut game = Application::build(app_root.join("examples/assets"), GameState::default())? .build(game_data)?; game.run(); Ok(()) } // This graph structure is used for creating a proper `RenderGraph` for // rendering. A renderGraph can be thought of as the stages during a render // pass. In our case, we are only executing one subpass (DrawFlat2D, or the // sprite pass). This graph also needs to be rebuilt whenever the window is // resized, so the boilerplate code for that operation is also here. #[derive(Default)] struct ExampleGraph { dimensions: Option<ScreenDimensions>, surface_format: Option<Format>, dirty: bool, } impl GraphCreator<DefaultBackend> for ExampleGraph { // This trait method reports to the renderer if the graph must be rebuilt, // usually because the window has been resized. This implementation checks // the screen size and returns true if it has changed. fn rebuild(&mut self, res: &Resources) -> bool { // Rebuild when dimensions change, but wait until at least two frames have the // same. let new_dimensions = res.try_fetch::<ScreenDimensions>(); use std::ops::Deref; if self.dimensions.as_ref()!= new_dimensions.as_ref().map(|d| d.deref()) { self.dirty = true; self.dimensions = new_dimensions.map(|d| d.clone()); return false; } return self.dirty; } // This is the core of a RenderGraph, which is building the actual graph with // subpasses and target images. fn builder( &mut self, factory: &mut Factory<DefaultBackend>, res: &Resources, ) -> GraphBuilder<DefaultBackend, Resources> { use amethyst::renderer::rendy::{ graph::present::PresentNode, hal::command::{ClearDepthStencil, ClearValue}, }; self.dirty = false; // Retrieve a reference to the target window, which is created by the // WindowBundle let window = <ReadExpect<'_, Arc<Window>>>::fetch(res); // Create a new drawing surface in our window let surface = factory.create_surface(&window); // cache surface format to speed things up let surface_format = *self .surface_format .get_or_insert_with(|| factory.get_surface_format(&surface)); let dimensions = self.dimensions.as_ref().unwrap(); let window_kind = image::Kind::D2( dbg!(dimensions.width()) as u32, dimensions.height() as u32, 1, 1, ); // Begin building our RenderGraph let mut graph_builder = GraphBuilder::new(); let color = graph_builder.create_image( window_kind, 1, surface_format, Some(ClearValue::Color([0.0, 0.0, 0.0, 1.0].into())), ); let depth = graph_builder.create_image( window_kind, 1, Format::D32Sfloat, Some(ClearValue::DepthStencil(ClearDepthStencil(1.0, 0))), ); // Create our single `Subpass`, which is the DrawFlat2D pass. // We pass the subpass builder a description of our pass for construction let sprite = graph_builder.add_node( SubpassBuilder::new() .with_group(DrawDebugLinesDesc::new().builder()) .with_group(DrawFlat2DDesc::new().builder()) .with_color(color) .with_depth_stencil(depth) .into_pass(), ); // Finally, add the pass to the graph let _present = graph_builder .add_node(PresentNode::builder(factory, surface, color).with_dependency(sprite)); graph_builder } }
{ dispatcher.dispatch(&data.world.res); }
conditional_block
lib.rs
//! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle //! huge texts and memory-incoherent edits with ease. //! //! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust) //! encoded as utf8. All of Ropey's editing and slicing operations are done //! in terms of char indices, which prevents accidental creation of invalid //! utf8 data. //! //! The library is made up of four main components: //! //! - [`Rope`]: the main rope type. //! - [`RopeSlice`]: an immutable view into part of a //! `Rope`. //! - [`iter`]: iterators over `Rope`/`RopeSlice` data. //! - [`RopeBuilder`]: an efficient incremental //! `Rope` builder. //! //! //! # A Basic Example //! //! Let's say we want to open up a text file, replace the 516th line (the //! writing was terrible!), and save it back to disk. It's contrived, but will //! give a good sampling of the APIs and how they work together. //! //! ```no_run //! # use std::io::Result; //! use std::fs::File; //! use std::io::{BufReader, BufWriter}; //! use ropey::Rope; //! //! # fn do_stuff() -> Result<()> { //! // Load a text file. //! let mut text = Rope::from_reader( //! BufReader::new(File::open("my_great_book.txt")?) //! )?; //! //! // Print the 516th line (zero-indexed) to see the terrible //! // writing. //! println!("{}", text.line(515)); //! //! // Get the start/end char indices of the line. //! let start_idx = text.line_to_char(515); //! let end_idx = text.line_to_char(516); //! //! // Remove the line... //! text.remove(start_idx..end_idx); //! //! //...and replace it with something better. //! text.insert(start_idx, "The flowers are... so... dunno.\n"); //! //! // Print the changes, along with the previous few lines for context. //! let start_idx = text.line_to_char(511); //! let end_idx = text.line_to_char(516); //! println!("{}", text.slice(start_idx..end_idx)); //! //! // Write the file back out to disk. //! text.write_to( //! BufWriter::new(File::create("my_great_book.txt")?) //! )?; //! # Ok(()) //! # } //! # do_stuff().unwrap(); //! ``` //! //! More examples can be found in the `examples` directory of the git //! repository. Many of those examples demonstrate doing non-trivial things //! with Ropey such as grapheme handling, search-and-replace, and streaming //! loading of non-utf8 text files. //! //! //! # Low-level APIs //! //! Ropey also provides access to some of its low-level APIs, enabling client //! code to efficiently work with a `Rope`'s data and implement new //! functionality. The most important of those API's are: //! //! - The [`chunk_at_*()`](Rope::chunk_at_byte) //! chunk-fetching methods of `Rope` and `RopeSlice`. //! - The [`Chunks`](iter::Chunks) iterator. //! - The functions in [`str_utils`] for operating on //! `&str` slices. //! //! Internally, each `Rope` stores text as a segemented collection of utf8 //! strings. The chunk-fetching methods and `Chunks` iterator provide direct //! access to those strings (or "chunks") as `&str` slices, allowing client //! code to work directly with the underlying utf8 data. //! //! The chunk-fetching methods and `str_utils` functions are the basic //! building blocks that Ropey itself uses to build much of its functionality. //! For example, the [`Rope::byte_to_char()`] //! method can be reimplemented as a free function like this: //! //! ```no_run //! use ropey::{ //! Rope, //! str_utils::byte_to_char_idx //! }; //! //! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize { //! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx); //! c + byte_to_char_idx(chunk, byte_idx - b) //! } //! ``` //! //! And this will be just as efficient as Ropey's implementation. //! //! The chunk-fetching methods in particular are among the fastest functions //! that Ropey provides, generally operating in the sub-hundred nanosecond //! range for medium-sized (~200kB) documents on recent-ish computer systems. //! //! //! # A Note About Line Breaks //! //! Some of Ropey's APIs use the concept of line breaks or lines of text. //! //! Ropey considers the start of the rope and positions immediately //! _after_ line breaks to be the start of new lines. And it treats //! line breaks as being a part of the lines they mark the end of. //! //! For example, the rope `"Hello"` has a single line: `"Hello"`. The //! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And //! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`, //! `"world\n"`, and `""`. //! //! Ropey can be configured at build time via feature flags to recognize //! different line breaks. Ropey always recognizes: //! //! - `U+000A` &mdash; LF (Line Feed) //! - `U+000D` `U+000A` &mdash; CRLF (Carriage Return + Line Feed) //! //! With the `cr_lines` feature, the following are also recognized: //! //! - `U+000D` &mdash; CR (Carriage Return) //! //! With the `unicode_lines` feature, in addition to all of the //! above, the following are also recognized (bringing Ropey into //! conformance with //! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)): //! //! - `U+000B` &mdash; VT (Vertical Tab) //! - `U+000C` &mdash; FF (Form Feed) //! - `U+0085` &mdash; NEL (Next Line) //! - `U+2028` &mdash; Line Separator //! - `U+2029` &mdash; Paragraph Separator //! //! (Note: `unicode_lines` is enabled by default, and always implies //! `cr_lines`.) //! //! CRLF pairs are always treated as a single line break, and are never split //! across chunks. Note, however, that slicing can still split them. //! //! //! # A Note About SIMD Acceleration //! //! Ropey has a `simd` feature flag (enabled by default) that enables //! explicit SIMD on supported platforms to improve performance. //! //! There is a bit of a footgun here: if you disable default features to //! configure line break behavior (as per the section above) then SIMD //! will also get disabled, and performance will suffer. So be careful //! to explicitly re-enable the `simd` feature flag (if desired) when //! doing that. #![allow(clippy::collapsible_if)] #![allow(clippy::inline_always)] #![allow(clippy::needless_return)] #![allow(clippy::redundant_field_names)] #![allow(clippy::type_complexity)] extern crate smallvec; extern crate str_indices; mod crlf; mod rope; mod rope_builder; mod slice; mod tree; pub mod iter; pub mod str_utils; use std::ops::Bound; pub use crate::rope::Rope; pub use crate::rope_builder::RopeBuilder; pub use crate::slice::RopeSlice; //============================================================== // Error reporting types. /// Ropey's result type. pub type Result<T> = std::result::Result<T, Error>; /// Ropey's error type. #[derive(Clone, Copy)] #[non_exhaustive] pub enum Error { /// Indicates that the passed byte index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in bytes, in that order. ByteIndexOutOfBounds(usize, usize), /// Indicates that the passed char index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in chars, in that order. CharIndexOutOfBounds(usize, usize), /// Indicates that the passed line index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in lines, in that order. LineIndexOutOfBounds(usize, usize), /// Indicates that the passed utf16 code-unit index was out of /// bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in utf16 code units, in that order. Utf16IndexOutOfBounds(usize, usize), /// Indicates that the passed byte index was not a char boundary. /// /// Contains the passed byte index. ByteIndexNotCharBoundary(usize), /// Indicates that the passed byte range didn't line up with char /// boundaries. /// /// Contains the [start, end) byte indices of the range, in that order. /// When either the start or end are `None`, that indicates a half-open /// range. ByteRangeNotCharBoundary( Option<usize>, // Start. Option<usize>, // End. ), /// Indicates that a reversed byte-index range (end < start) was /// encountered. /// /// Contains the [start, end) byte indices of the range, in that order. ByteRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that a reversed char-index range (end < start) was /// encountered. /// /// Contains the [start, end) char indices of the range, in that order. CharRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that the passed byte-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) byte indices of the range and the actual /// length of the `Rope`/`RopeSlice` in bytes, in that order. When /// either the start or end are `None`, that indicates a half-open range. ByteRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope byte length. ), /// Indicates that the passed char-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) char indices of the range and the actual /// length of the `Rope`/`RopeSlice` in chars, in that order. When /// either the start or end are `None`, that indicates a half-open range. CharRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope char length. ), } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error +'static)> { None } // Deprecated in std. fn description(&self) -> &str { "" } // Deprecated in std. fn cause(&self) -> Option<&dyn std::error::Error> { None } } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::ByteIndexOutOfBounds(index, len) => { write!( f, "Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}", index, len ) } Error::CharIndexOutOfBounds(index, len) => { write!( f, "Char index out of bounds: char index {}, Rope/RopeSlice char length {}", index, len ) } Error::LineIndexOutOfBounds(index, len) => { write!( f, "Line index out of bounds: line index {}, Rope/RopeSlice line count {}", index, len ) } Error::Utf16IndexOutOfBounds(index, len) => { write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len) } Error::ByteIndexNotCharBoundary(index) => { write!( f, "Byte index is not a valid char boundary: byte index {}", index ) } Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => { write!(f, "Byte range does not align with char boundaries: range ")?; write_range(f, start_idx_opt, end_idx_opt) } Error::ByteRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid byte range {}..{}: start must be <= end", start_idx, end_idx ) } Error::CharRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid char range {}..{}: start must be <= end", start_idx, end_idx ) } Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Byte range out of bounds: byte range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice byte length {}", len) } Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Char range out of bounds: char range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice char length {}", len) } } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Just re-use the debug impl. std::fmt::Debug::fmt(self, f) } } fn
( f: &mut std::fmt::Formatter<'_>, start_idx: Option<usize>, end_idx: Option<usize>, ) -> std::fmt::Result { match (start_idx, end_idx) { (None, None) => { write!(f, "..") } (Some(start), None) => { write!(f, "{}..", start) } (None, Some(end)) => { write!(f, "..{}", end) } (Some(start), Some(end)) => { write!(f, "{}..{}", start, end) } } } //============================================================== // Range handling utilities. #[inline(always)] pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n), Bound::Excluded(n) => Some(*n + 1), Bound::Unbounded => None, } } #[inline(always)] pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n + 1), Bound::Excluded(n) => Some(*n), Bound::Unbounded => None, } }
write_range
identifier_name
lib.rs
//! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle //! huge texts and memory-incoherent edits with ease. //! //! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust) //! encoded as utf8. All of Ropey's editing and slicing operations are done //! in terms of char indices, which prevents accidental creation of invalid //! utf8 data. //! //! The library is made up of four main components: //! //! - [`Rope`]: the main rope type. //! - [`RopeSlice`]: an immutable view into part of a //! `Rope`. //! - [`iter`]: iterators over `Rope`/`RopeSlice` data. //! - [`RopeBuilder`]: an efficient incremental //! `Rope` builder. //! //! //! # A Basic Example //! //! Let's say we want to open up a text file, replace the 516th line (the //! writing was terrible!), and save it back to disk. It's contrived, but will //! give a good sampling of the APIs and how they work together. //! //! ```no_run //! # use std::io::Result; //! use std::fs::File; //! use std::io::{BufReader, BufWriter}; //! use ropey::Rope; //! //! # fn do_stuff() -> Result<()> { //! // Load a text file. //! let mut text = Rope::from_reader( //! BufReader::new(File::open("my_great_book.txt")?) //! )?; //! //! // Print the 516th line (zero-indexed) to see the terrible //! // writing. //! println!("{}", text.line(515)); //! //! // Get the start/end char indices of the line. //! let start_idx = text.line_to_char(515); //! let end_idx = text.line_to_char(516); //! //! // Remove the line... //! text.remove(start_idx..end_idx); //! //! //...and replace it with something better. //! text.insert(start_idx, "The flowers are... so... dunno.\n"); //! //! // Print the changes, along with the previous few lines for context. //! let start_idx = text.line_to_char(511); //! let end_idx = text.line_to_char(516); //! println!("{}", text.slice(start_idx..end_idx)); //! //! // Write the file back out to disk. //! text.write_to( //! BufWriter::new(File::create("my_great_book.txt")?) //! )?; //! # Ok(()) //! # } //! # do_stuff().unwrap(); //! ``` //! //! More examples can be found in the `examples` directory of the git //! repository. Many of those examples demonstrate doing non-trivial things //! with Ropey such as grapheme handling, search-and-replace, and streaming //! loading of non-utf8 text files. //! //! //! # Low-level APIs //! //! Ropey also provides access to some of its low-level APIs, enabling client //! code to efficiently work with a `Rope`'s data and implement new //! functionality. The most important of those API's are: //! //! - The [`chunk_at_*()`](Rope::chunk_at_byte) //! chunk-fetching methods of `Rope` and `RopeSlice`. //! - The [`Chunks`](iter::Chunks) iterator. //! - The functions in [`str_utils`] for operating on //! `&str` slices. //! //! Internally, each `Rope` stores text as a segemented collection of utf8 //! strings. The chunk-fetching methods and `Chunks` iterator provide direct //! access to those strings (or "chunks") as `&str` slices, allowing client //! code to work directly with the underlying utf8 data. //! //! The chunk-fetching methods and `str_utils` functions are the basic //! building blocks that Ropey itself uses to build much of its functionality. //! For example, the [`Rope::byte_to_char()`] //! method can be reimplemented as a free function like this: //! //! ```no_run //! use ropey::{ //! Rope, //! str_utils::byte_to_char_idx //! }; //! //! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize { //! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx); //! c + byte_to_char_idx(chunk, byte_idx - b) //! } //! ``` //! //! And this will be just as efficient as Ropey's implementation. //! //! The chunk-fetching methods in particular are among the fastest functions //! that Ropey provides, generally operating in the sub-hundred nanosecond //! range for medium-sized (~200kB) documents on recent-ish computer systems. //! //! //! # A Note About Line Breaks //! //! Some of Ropey's APIs use the concept of line breaks or lines of text. //! //! Ropey considers the start of the rope and positions immediately //! _after_ line breaks to be the start of new lines. And it treats //! line breaks as being a part of the lines they mark the end of. //! //! For example, the rope `"Hello"` has a single line: `"Hello"`. The //! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And //! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`, //! `"world\n"`, and `""`. //! //! Ropey can be configured at build time via feature flags to recognize //! different line breaks. Ropey always recognizes: //! //! - `U+000A` &mdash; LF (Line Feed) //! - `U+000D` `U+000A` &mdash; CRLF (Carriage Return + Line Feed) //! //! With the `cr_lines` feature, the following are also recognized: //! //! - `U+000D` &mdash; CR (Carriage Return) //! //! With the `unicode_lines` feature, in addition to all of the //! above, the following are also recognized (bringing Ropey into //! conformance with //! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)): //! //! - `U+000B` &mdash; VT (Vertical Tab) //! - `U+000C` &mdash; FF (Form Feed) //! - `U+0085` &mdash; NEL (Next Line) //! - `U+2028` &mdash; Line Separator //! - `U+2029` &mdash; Paragraph Separator //! //! (Note: `unicode_lines` is enabled by default, and always implies //! `cr_lines`.) //! //! CRLF pairs are always treated as a single line break, and are never split //! across chunks. Note, however, that slicing can still split them. //! //! //! # A Note About SIMD Acceleration //! //! Ropey has a `simd` feature flag (enabled by default) that enables //! explicit SIMD on supported platforms to improve performance. //! //! There is a bit of a footgun here: if you disable default features to //! configure line break behavior (as per the section above) then SIMD //! will also get disabled, and performance will suffer. So be careful //! to explicitly re-enable the `simd` feature flag (if desired) when //! doing that. #![allow(clippy::collapsible_if)] #![allow(clippy::inline_always)] #![allow(clippy::needless_return)] #![allow(clippy::redundant_field_names)] #![allow(clippy::type_complexity)] extern crate smallvec; extern crate str_indices; mod crlf; mod rope; mod rope_builder; mod slice; mod tree; pub mod iter; pub mod str_utils; use std::ops::Bound; pub use crate::rope::Rope; pub use crate::rope_builder::RopeBuilder; pub use crate::slice::RopeSlice; //============================================================== // Error reporting types. /// Ropey's result type. pub type Result<T> = std::result::Result<T, Error>; /// Ropey's error type. #[derive(Clone, Copy)] #[non_exhaustive] pub enum Error { /// Indicates that the passed byte index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in bytes, in that order. ByteIndexOutOfBounds(usize, usize), /// Indicates that the passed char index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in chars, in that order. CharIndexOutOfBounds(usize, usize), /// Indicates that the passed line index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in lines, in that order. LineIndexOutOfBounds(usize, usize), /// Indicates that the passed utf16 code-unit index was out of /// bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in utf16 code units, in that order. Utf16IndexOutOfBounds(usize, usize), /// Indicates that the passed byte index was not a char boundary. /// /// Contains the passed byte index. ByteIndexNotCharBoundary(usize), /// Indicates that the passed byte range didn't line up with char /// boundaries. /// /// Contains the [start, end) byte indices of the range, in that order. /// When either the start or end are `None`, that indicates a half-open /// range. ByteRangeNotCharBoundary( Option<usize>, // Start. Option<usize>, // End. ), /// Indicates that a reversed byte-index range (end < start) was /// encountered. /// /// Contains the [start, end) byte indices of the range, in that order. ByteRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that a reversed char-index range (end < start) was /// encountered. /// /// Contains the [start, end) char indices of the range, in that order. CharRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that the passed byte-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) byte indices of the range and the actual /// length of the `Rope`/`RopeSlice` in bytes, in that order. When /// either the start or end are `None`, that indicates a half-open range. ByteRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope byte length. ), /// Indicates that the passed char-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) char indices of the range and the actual /// length of the `Rope`/`RopeSlice` in chars, in that order. When /// either the start or end are `None`, that indicates a half-open range. CharRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope char length. ), } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error +'static)> { None } // Deprecated in std. fn description(&self) -> &str
// Deprecated in std. fn cause(&self) -> Option<&dyn std::error::Error> { None } } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::ByteIndexOutOfBounds(index, len) => { write!( f, "Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}", index, len ) } Error::CharIndexOutOfBounds(index, len) => { write!( f, "Char index out of bounds: char index {}, Rope/RopeSlice char length {}", index, len ) } Error::LineIndexOutOfBounds(index, len) => { write!( f, "Line index out of bounds: line index {}, Rope/RopeSlice line count {}", index, len ) } Error::Utf16IndexOutOfBounds(index, len) => { write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len) } Error::ByteIndexNotCharBoundary(index) => { write!( f, "Byte index is not a valid char boundary: byte index {}", index ) } Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => { write!(f, "Byte range does not align with char boundaries: range ")?; write_range(f, start_idx_opt, end_idx_opt) } Error::ByteRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid byte range {}..{}: start must be <= end", start_idx, end_idx ) } Error::CharRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid char range {}..{}: start must be <= end", start_idx, end_idx ) } Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Byte range out of bounds: byte range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice byte length {}", len) } Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Char range out of bounds: char range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice char length {}", len) } } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Just re-use the debug impl. std::fmt::Debug::fmt(self, f) } } fn write_range( f: &mut std::fmt::Formatter<'_>, start_idx: Option<usize>, end_idx: Option<usize>, ) -> std::fmt::Result { match (start_idx, end_idx) { (None, None) => { write!(f, "..") } (Some(start), None) => { write!(f, "{}..", start) } (None, Some(end)) => { write!(f, "..{}", end) } (Some(start), Some(end)) => { write!(f, "{}..{}", start, end) } } } //============================================================== // Range handling utilities. #[inline(always)] pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n), Bound::Excluded(n) => Some(*n + 1), Bound::Unbounded => None, } } #[inline(always)] pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n + 1), Bound::Excluded(n) => Some(*n), Bound::Unbounded => None, } }
{ "" }
identifier_body
lib.rs
//! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle //! huge texts and memory-incoherent edits with ease. //! //! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust) //! encoded as utf8. All of Ropey's editing and slicing operations are done //! in terms of char indices, which prevents accidental creation of invalid //! utf8 data. //! //! The library is made up of four main components: //! //! - [`Rope`]: the main rope type. //! - [`RopeSlice`]: an immutable view into part of a //! `Rope`. //! - [`iter`]: iterators over `Rope`/`RopeSlice` data. //! - [`RopeBuilder`]: an efficient incremental //! `Rope` builder. //! //! //! # A Basic Example //! //! Let's say we want to open up a text file, replace the 516th line (the //! writing was terrible!), and save it back to disk. It's contrived, but will //! give a good sampling of the APIs and how they work together. //! //! ```no_run //! # use std::io::Result; //! use std::fs::File; //! use std::io::{BufReader, BufWriter}; //! use ropey::Rope; //! //! # fn do_stuff() -> Result<()> { //! // Load a text file. //! let mut text = Rope::from_reader( //! BufReader::new(File::open("my_great_book.txt")?) //! )?; //! //! // Print the 516th line (zero-indexed) to see the terrible //! // writing. //! println!("{}", text.line(515)); //! //! // Get the start/end char indices of the line. //! let start_idx = text.line_to_char(515); //! let end_idx = text.line_to_char(516); //! //! // Remove the line... //! text.remove(start_idx..end_idx); //! //! //...and replace it with something better. //! text.insert(start_idx, "The flowers are... so... dunno.\n"); //! //! // Print the changes, along with the previous few lines for context. //! let start_idx = text.line_to_char(511); //! let end_idx = text.line_to_char(516); //! println!("{}", text.slice(start_idx..end_idx)); //! //! // Write the file back out to disk. //! text.write_to( //! BufWriter::new(File::create("my_great_book.txt")?) //! )?; //! # Ok(()) //! # } //! # do_stuff().unwrap(); //! ``` //! //! More examples can be found in the `examples` directory of the git //! repository. Many of those examples demonstrate doing non-trivial things //! with Ropey such as grapheme handling, search-and-replace, and streaming //! loading of non-utf8 text files. //! //! //! # Low-level APIs //! //! Ropey also provides access to some of its low-level APIs, enabling client //! code to efficiently work with a `Rope`'s data and implement new //! functionality. The most important of those API's are: //! //! - The [`chunk_at_*()`](Rope::chunk_at_byte) //! chunk-fetching methods of `Rope` and `RopeSlice`. //! - The [`Chunks`](iter::Chunks) iterator. //! - The functions in [`str_utils`] for operating on //! `&str` slices. //! //! Internally, each `Rope` stores text as a segemented collection of utf8 //! strings. The chunk-fetching methods and `Chunks` iterator provide direct //! access to those strings (or "chunks") as `&str` slices, allowing client //! code to work directly with the underlying utf8 data. //! //! The chunk-fetching methods and `str_utils` functions are the basic //! building blocks that Ropey itself uses to build much of its functionality. //! For example, the [`Rope::byte_to_char()`] //! method can be reimplemented as a free function like this: //! //! ```no_run //! use ropey::{ //! Rope, //! str_utils::byte_to_char_idx //! }; //! //! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize { //! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx); //! c + byte_to_char_idx(chunk, byte_idx - b) //! } //! ``` //! //! And this will be just as efficient as Ropey's implementation. //! //! The chunk-fetching methods in particular are among the fastest functions //! that Ropey provides, generally operating in the sub-hundred nanosecond //! range for medium-sized (~200kB) documents on recent-ish computer systems. //! //! //! # A Note About Line Breaks //! //! Some of Ropey's APIs use the concept of line breaks or lines of text. //! //! Ropey considers the start of the rope and positions immediately //! _after_ line breaks to be the start of new lines. And it treats //! line breaks as being a part of the lines they mark the end of. //! //! For example, the rope `"Hello"` has a single line: `"Hello"`. The //! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And //! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`, //! `"world\n"`, and `""`. //! //! Ropey can be configured at build time via feature flags to recognize //! different line breaks. Ropey always recognizes: //! //! - `U+000A` &mdash; LF (Line Feed) //! - `U+000D` `U+000A` &mdash; CRLF (Carriage Return + Line Feed) //! //! With the `cr_lines` feature, the following are also recognized: //! //! - `U+000D` &mdash; CR (Carriage Return) //! //! With the `unicode_lines` feature, in addition to all of the //! above, the following are also recognized (bringing Ropey into //! conformance with //! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)): //! //! - `U+000B` &mdash; VT (Vertical Tab) //! - `U+000C` &mdash; FF (Form Feed) //! - `U+0085` &mdash; NEL (Next Line) //! - `U+2028` &mdash; Line Separator //! - `U+2029` &mdash; Paragraph Separator //! //! (Note: `unicode_lines` is enabled by default, and always implies //! `cr_lines`.) //! //! CRLF pairs are always treated as a single line break, and are never split //! across chunks. Note, however, that slicing can still split them. //! //! //! # A Note About SIMD Acceleration //! //! Ropey has a `simd` feature flag (enabled by default) that enables //! explicit SIMD on supported platforms to improve performance. //! //! There is a bit of a footgun here: if you disable default features to //! configure line break behavior (as per the section above) then SIMD //! will also get disabled, and performance will suffer. So be careful //! to explicitly re-enable the `simd` feature flag (if desired) when //! doing that. #![allow(clippy::collapsible_if)] #![allow(clippy::inline_always)] #![allow(clippy::needless_return)] #![allow(clippy::redundant_field_names)] #![allow(clippy::type_complexity)] extern crate smallvec; extern crate str_indices; mod crlf; mod rope; mod rope_builder; mod slice; mod tree; pub mod iter; pub mod str_utils; use std::ops::Bound; pub use crate::rope::Rope; pub use crate::rope_builder::RopeBuilder; pub use crate::slice::RopeSlice; //============================================================== // Error reporting types. /// Ropey's result type. pub type Result<T> = std::result::Result<T, Error>; /// Ropey's error type. #[derive(Clone, Copy)] #[non_exhaustive] pub enum Error { /// Indicates that the passed byte index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in bytes, in that order. ByteIndexOutOfBounds(usize, usize),
/// Indicates that the passed char index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in chars, in that order. CharIndexOutOfBounds(usize, usize), /// Indicates that the passed line index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in lines, in that order. LineIndexOutOfBounds(usize, usize), /// Indicates that the passed utf16 code-unit index was out of /// bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in utf16 code units, in that order. Utf16IndexOutOfBounds(usize, usize), /// Indicates that the passed byte index was not a char boundary. /// /// Contains the passed byte index. ByteIndexNotCharBoundary(usize), /// Indicates that the passed byte range didn't line up with char /// boundaries. /// /// Contains the [start, end) byte indices of the range, in that order. /// When either the start or end are `None`, that indicates a half-open /// range. ByteRangeNotCharBoundary( Option<usize>, // Start. Option<usize>, // End. ), /// Indicates that a reversed byte-index range (end < start) was /// encountered. /// /// Contains the [start, end) byte indices of the range, in that order. ByteRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that a reversed char-index range (end < start) was /// encountered. /// /// Contains the [start, end) char indices of the range, in that order. CharRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that the passed byte-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) byte indices of the range and the actual /// length of the `Rope`/`RopeSlice` in bytes, in that order. When /// either the start or end are `None`, that indicates a half-open range. ByteRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope byte length. ), /// Indicates that the passed char-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) char indices of the range and the actual /// length of the `Rope`/`RopeSlice` in chars, in that order. When /// either the start or end are `None`, that indicates a half-open range. CharRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope char length. ), } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error +'static)> { None } // Deprecated in std. fn description(&self) -> &str { "" } // Deprecated in std. fn cause(&self) -> Option<&dyn std::error::Error> { None } } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::ByteIndexOutOfBounds(index, len) => { write!( f, "Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}", index, len ) } Error::CharIndexOutOfBounds(index, len) => { write!( f, "Char index out of bounds: char index {}, Rope/RopeSlice char length {}", index, len ) } Error::LineIndexOutOfBounds(index, len) => { write!( f, "Line index out of bounds: line index {}, Rope/RopeSlice line count {}", index, len ) } Error::Utf16IndexOutOfBounds(index, len) => { write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len) } Error::ByteIndexNotCharBoundary(index) => { write!( f, "Byte index is not a valid char boundary: byte index {}", index ) } Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => { write!(f, "Byte range does not align with char boundaries: range ")?; write_range(f, start_idx_opt, end_idx_opt) } Error::ByteRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid byte range {}..{}: start must be <= end", start_idx, end_idx ) } Error::CharRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid char range {}..{}: start must be <= end", start_idx, end_idx ) } Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Byte range out of bounds: byte range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice byte length {}", len) } Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Char range out of bounds: char range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice char length {}", len) } } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Just re-use the debug impl. std::fmt::Debug::fmt(self, f) } } fn write_range( f: &mut std::fmt::Formatter<'_>, start_idx: Option<usize>, end_idx: Option<usize>, ) -> std::fmt::Result { match (start_idx, end_idx) { (None, None) => { write!(f, "..") } (Some(start), None) => { write!(f, "{}..", start) } (None, Some(end)) => { write!(f, "..{}", end) } (Some(start), Some(end)) => { write!(f, "{}..{}", start, end) } } } //============================================================== // Range handling utilities. #[inline(always)] pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n), Bound::Excluded(n) => Some(*n + 1), Bound::Unbounded => None, } } #[inline(always)] pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n + 1), Bound::Excluded(n) => Some(*n), Bound::Unbounded => None, } }
random_line_split
lib.rs
//! Ropey is a utf8 text rope for Rust. It is fast, robust, and can handle //! huge texts and memory-incoherent edits with ease. //! //! Ropey's atomic unit of text is Unicode scalar values (or `char`s in Rust) //! encoded as utf8. All of Ropey's editing and slicing operations are done //! in terms of char indices, which prevents accidental creation of invalid //! utf8 data. //! //! The library is made up of four main components: //! //! - [`Rope`]: the main rope type. //! - [`RopeSlice`]: an immutable view into part of a //! `Rope`. //! - [`iter`]: iterators over `Rope`/`RopeSlice` data. //! - [`RopeBuilder`]: an efficient incremental //! `Rope` builder. //! //! //! # A Basic Example //! //! Let's say we want to open up a text file, replace the 516th line (the //! writing was terrible!), and save it back to disk. It's contrived, but will //! give a good sampling of the APIs and how they work together. //! //! ```no_run //! # use std::io::Result; //! use std::fs::File; //! use std::io::{BufReader, BufWriter}; //! use ropey::Rope; //! //! # fn do_stuff() -> Result<()> { //! // Load a text file. //! let mut text = Rope::from_reader( //! BufReader::new(File::open("my_great_book.txt")?) //! )?; //! //! // Print the 516th line (zero-indexed) to see the terrible //! // writing. //! println!("{}", text.line(515)); //! //! // Get the start/end char indices of the line. //! let start_idx = text.line_to_char(515); //! let end_idx = text.line_to_char(516); //! //! // Remove the line... //! text.remove(start_idx..end_idx); //! //! //...and replace it with something better. //! text.insert(start_idx, "The flowers are... so... dunno.\n"); //! //! // Print the changes, along with the previous few lines for context. //! let start_idx = text.line_to_char(511); //! let end_idx = text.line_to_char(516); //! println!("{}", text.slice(start_idx..end_idx)); //! //! // Write the file back out to disk. //! text.write_to( //! BufWriter::new(File::create("my_great_book.txt")?) //! )?; //! # Ok(()) //! # } //! # do_stuff().unwrap(); //! ``` //! //! More examples can be found in the `examples` directory of the git //! repository. Many of those examples demonstrate doing non-trivial things //! with Ropey such as grapheme handling, search-and-replace, and streaming //! loading of non-utf8 text files. //! //! //! # Low-level APIs //! //! Ropey also provides access to some of its low-level APIs, enabling client //! code to efficiently work with a `Rope`'s data and implement new //! functionality. The most important of those API's are: //! //! - The [`chunk_at_*()`](Rope::chunk_at_byte) //! chunk-fetching methods of `Rope` and `RopeSlice`. //! - The [`Chunks`](iter::Chunks) iterator. //! - The functions in [`str_utils`] for operating on //! `&str` slices. //! //! Internally, each `Rope` stores text as a segemented collection of utf8 //! strings. The chunk-fetching methods and `Chunks` iterator provide direct //! access to those strings (or "chunks") as `&str` slices, allowing client //! code to work directly with the underlying utf8 data. //! //! The chunk-fetching methods and `str_utils` functions are the basic //! building blocks that Ropey itself uses to build much of its functionality. //! For example, the [`Rope::byte_to_char()`] //! method can be reimplemented as a free function like this: //! //! ```no_run //! use ropey::{ //! Rope, //! str_utils::byte_to_char_idx //! }; //! //! fn byte_to_char(rope: &Rope, byte_idx: usize) -> usize { //! let (chunk, b, c, _) = rope.chunk_at_byte(byte_idx); //! c + byte_to_char_idx(chunk, byte_idx - b) //! } //! ``` //! //! And this will be just as efficient as Ropey's implementation. //! //! The chunk-fetching methods in particular are among the fastest functions //! that Ropey provides, generally operating in the sub-hundred nanosecond //! range for medium-sized (~200kB) documents on recent-ish computer systems. //! //! //! # A Note About Line Breaks //! //! Some of Ropey's APIs use the concept of line breaks or lines of text. //! //! Ropey considers the start of the rope and positions immediately //! _after_ line breaks to be the start of new lines. And it treats //! line breaks as being a part of the lines they mark the end of. //! //! For example, the rope `"Hello"` has a single line: `"Hello"`. The //! rope `"Hello\nworld"` has two lines: `"Hello\n"` and `"world"`. And //! the rope `"Hello\nworld\n"` has three lines: `"Hello\n"`, //! `"world\n"`, and `""`. //! //! Ropey can be configured at build time via feature flags to recognize //! different line breaks. Ropey always recognizes: //! //! - `U+000A` &mdash; LF (Line Feed) //! - `U+000D` `U+000A` &mdash; CRLF (Carriage Return + Line Feed) //! //! With the `cr_lines` feature, the following are also recognized: //! //! - `U+000D` &mdash; CR (Carriage Return) //! //! With the `unicode_lines` feature, in addition to all of the //! above, the following are also recognized (bringing Ropey into //! conformance with //! [Unicode Annex #14](https://www.unicode.org/reports/tr14/#BK)): //! //! - `U+000B` &mdash; VT (Vertical Tab) //! - `U+000C` &mdash; FF (Form Feed) //! - `U+0085` &mdash; NEL (Next Line) //! - `U+2028` &mdash; Line Separator //! - `U+2029` &mdash; Paragraph Separator //! //! (Note: `unicode_lines` is enabled by default, and always implies //! `cr_lines`.) //! //! CRLF pairs are always treated as a single line break, and are never split //! across chunks. Note, however, that slicing can still split them. //! //! //! # A Note About SIMD Acceleration //! //! Ropey has a `simd` feature flag (enabled by default) that enables //! explicit SIMD on supported platforms to improve performance. //! //! There is a bit of a footgun here: if you disable default features to //! configure line break behavior (as per the section above) then SIMD //! will also get disabled, and performance will suffer. So be careful //! to explicitly re-enable the `simd` feature flag (if desired) when //! doing that. #![allow(clippy::collapsible_if)] #![allow(clippy::inline_always)] #![allow(clippy::needless_return)] #![allow(clippy::redundant_field_names)] #![allow(clippy::type_complexity)] extern crate smallvec; extern crate str_indices; mod crlf; mod rope; mod rope_builder; mod slice; mod tree; pub mod iter; pub mod str_utils; use std::ops::Bound; pub use crate::rope::Rope; pub use crate::rope_builder::RopeBuilder; pub use crate::slice::RopeSlice; //============================================================== // Error reporting types. /// Ropey's result type. pub type Result<T> = std::result::Result<T, Error>; /// Ropey's error type. #[derive(Clone, Copy)] #[non_exhaustive] pub enum Error { /// Indicates that the passed byte index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in bytes, in that order. ByteIndexOutOfBounds(usize, usize), /// Indicates that the passed char index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in chars, in that order. CharIndexOutOfBounds(usize, usize), /// Indicates that the passed line index was out of bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in lines, in that order. LineIndexOutOfBounds(usize, usize), /// Indicates that the passed utf16 code-unit index was out of /// bounds. /// /// Contains the index attempted and the actual length of the /// `Rope`/`RopeSlice` in utf16 code units, in that order. Utf16IndexOutOfBounds(usize, usize), /// Indicates that the passed byte index was not a char boundary. /// /// Contains the passed byte index. ByteIndexNotCharBoundary(usize), /// Indicates that the passed byte range didn't line up with char /// boundaries. /// /// Contains the [start, end) byte indices of the range, in that order. /// When either the start or end are `None`, that indicates a half-open /// range. ByteRangeNotCharBoundary( Option<usize>, // Start. Option<usize>, // End. ), /// Indicates that a reversed byte-index range (end < start) was /// encountered. /// /// Contains the [start, end) byte indices of the range, in that order. ByteRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that a reversed char-index range (end < start) was /// encountered. /// /// Contains the [start, end) char indices of the range, in that order. CharRangeInvalid( usize, // Start. usize, // End. ), /// Indicates that the passed byte-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) byte indices of the range and the actual /// length of the `Rope`/`RopeSlice` in bytes, in that order. When /// either the start or end are `None`, that indicates a half-open range. ByteRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope byte length. ), /// Indicates that the passed char-index range was partially or fully /// out of bounds. /// /// Contains the [start, end) char indices of the range and the actual /// length of the `Rope`/`RopeSlice` in chars, in that order. When /// either the start or end are `None`, that indicates a half-open range. CharRangeOutOfBounds( Option<usize>, // Start. Option<usize>, // End. usize, // Rope char length. ), } impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error +'static)> { None } // Deprecated in std. fn description(&self) -> &str { "" } // Deprecated in std. fn cause(&self) -> Option<&dyn std::error::Error> { None } } impl std::fmt::Debug for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self { Error::ByteIndexOutOfBounds(index, len) => { write!( f, "Byte index out of bounds: byte index {}, Rope/RopeSlice byte length {}", index, len ) } Error::CharIndexOutOfBounds(index, len) => { write!( f, "Char index out of bounds: char index {}, Rope/RopeSlice char length {}", index, len ) } Error::LineIndexOutOfBounds(index, len) => { write!( f, "Line index out of bounds: line index {}, Rope/RopeSlice line count {}", index, len ) } Error::Utf16IndexOutOfBounds(index, len) => { write!(f, "Utf16 code-unit index out of bounds: utf16 index {}, Rope/RopeSlice utf16 length {}", index, len) } Error::ByteIndexNotCharBoundary(index) => { write!( f, "Byte index is not a valid char boundary: byte index {}", index ) } Error::ByteRangeNotCharBoundary(start_idx_opt, end_idx_opt) => { write!(f, "Byte range does not align with char boundaries: range ")?; write_range(f, start_idx_opt, end_idx_opt) } Error::ByteRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid byte range {}..{}: start must be <= end", start_idx, end_idx ) } Error::CharRangeInvalid(start_idx, end_idx) => { write!( f, "Invalid char range {}..{}: start must be <= end", start_idx, end_idx ) } Error::ByteRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Byte range out of bounds: byte range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice byte length {}", len) } Error::CharRangeOutOfBounds(start_idx_opt, end_idx_opt, len) => { write!(f, "Char range out of bounds: char range ")?; write_range(f, start_idx_opt, end_idx_opt)?; write!(f, ", Rope/RopeSlice char length {}", len) } } } } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Just re-use the debug impl. std::fmt::Debug::fmt(self, f) } } fn write_range( f: &mut std::fmt::Formatter<'_>, start_idx: Option<usize>, end_idx: Option<usize>, ) -> std::fmt::Result { match (start_idx, end_idx) { (None, None) => { write!(f, "..") } (Some(start), None) => { write!(f, "{}..", start) } (None, Some(end)) => { write!(f, "..{}", end) } (Some(start), Some(end)) =>
} } //============================================================== // Range handling utilities. #[inline(always)] pub(crate) fn start_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n), Bound::Excluded(n) => Some(*n + 1), Bound::Unbounded => None, } } #[inline(always)] pub(crate) fn end_bound_to_num(b: Bound<&usize>) -> Option<usize> { match b { Bound::Included(n) => Some(*n + 1), Bound::Excluded(n) => Some(*n), Bound::Unbounded => None, } }
{ write!(f, "{}..{}", start, end) }
conditional_block
proto_connection.rs
//! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of //! capacity. use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; use std::{ io::{Read, Write}, mem::{replace, size_of}, net::{TcpListener, TcpStream}, thread, }; use x11rb_protocol::{ connection::{Connection, ReplyFdKind}, protocol::xproto::{Depth, Rectangle, Screen}, x11_utils::{Serialize, TryParse}, DiscardMode, SequenceNumber, }; #[cfg(unix)] use std::os::unix::net::UnixStream; fn
(i: i32, p: i32) -> i32 { let mut result = 1; for _ in 0..p { result *= i; } result } fn enqueue_packet_test(c: &mut Criterion) { // take the cartesian product of the following conditions: // - the packet is an event, a reply, or an error // - pending_events and pending_replies are empty, have one element, or have // many elements enum PacketType { Event, Reply, Error, } enum PacketCount { Empty, One, Many, } use PacketCount::*; use PacketType::*; let mut group = c.benchmark_group("enqueue_packet"); for packet_ty in &[Event, Reply, Error] { for packet_count in &[Empty, One, Many] { let packet_ty_desc = match packet_ty { Event => "event", Reply => "reply", Error => "error", }; let packet_count_desc = match packet_count { Empty => "no", One => "one", Many => "many", }; let name = format!( "enqueue_packet {} with {} packets", packet_ty_desc, packet_count_desc ); group.bench_function(name, |b| { // generate a valid packet with the given first byte and sequence number let mut seqno = 0u16; let mut packet = move |ind: u8| { let our_seqno = seqno + 1; seqno += 1; let mut v = vec![0; 32]; v[0] = ind; // copy our_seqno to bytes 3 and 4 v[2..4].copy_from_slice(&our_seqno.to_ne_bytes()); v }; // we need another one for make_conn let mut packet2 = packet; let queue_count = match packet_count { PacketCount::Empty => 0, PacketCount::One => 1, PacketCount::Many => pow(2, 8), }; // create a connection with the given stats let mut make_conn = || { let mut conn = Connection::new(); for _ in 0..queue_count { // push a new event conn.enqueue_packet(packet2(2)); } for _ in 0..queue_count { // push a new reply conn.enqueue_packet(packet2(1)); } conn }; let mut conn = make_conn(); let packet = packet(match packet_ty { Event => 2, Reply => 1, Error => 0, }); b.iter(move || { conn.enqueue_packet(packet.clone()); }) }); } } } fn send_and_receive_request(c: &mut Criterion) { // permutations: // - send queue is empty or very full // - receive queue is empty of very full enum SendQueue { SEmpty, SFull, } enum RecvQueue { REmpty, RFull, } use RecvQueue::*; use SendQueue::*; let mut group = c.benchmark_group("send_and_receive_request"); for send_queue in &[SEmpty, SFull] { for recv_queue in &[REmpty, RFull] { let name = format!( "send_and_receive_request (send {}, recv {})", match send_queue { SEmpty => "empty", SFull => "full", }, match recv_queue { REmpty => "empty", RFull => "full", } ); group.bench_function(name, |b| { // create a new connection let mut conn = Connection::new(); // if the send queue needs to be full, flood it with sent requests if matches!(send_queue, SFull) { for _ in 0..pow(2, 14) { conn.send_request(match recv_queue { REmpty => ReplyFdKind::NoReply, RFull => ReplyFdKind::ReplyWithoutFDs, }); } } // if the recv queue needs to be full, flood it with replies if matches!(recv_queue, RFull) { for _ in 0..pow(2, 14) { let mut packet = vec![0; 32]; packet[0] = 1; conn.enqueue_packet(packet); } } // create a new packet let mut packet = vec![0u8; 32]; packet[0] = 1; b.iter(move || { // send our request let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap(); // truncate to a u16 let seq_trunc = seq as u16; // insert the sequence number at positions 2 and 3 packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes()); // enqueue the packet conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32]))); // pop the reply conn.poll_for_reply_or_error(seq) }) }); } } } fn try_parse_small_struct(c: &mut Criterion) { // xproto::Rectangle is a pointer wide on 64-bit, use that c.bench_function("try_parse an xproto::Rectangle", |b| { let packet = [0x42u8; size_of::<Rectangle>()]; b.iter(|| Rectangle::try_parse(black_box(&packet))) }); } fn try_parse_large_struct(c: &mut Criterion) { // xproto::Screen is a significantly larger structure, use that const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>(); const NUM_DEPTHS: usize = 3; const DEPTH_SIZE: usize = 8; const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE); c.bench_function("try_parse an xproto::Screen", |b| { let mut packet = [0; TOTAL_SIZE]; packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8; b.iter(|| Screen::try_parse(black_box(&packet))) }); } fn serialize_struct(c: &mut Criterion) { // try the following: // - send it down a TCP socket // - send it down a Unix socket (if linux) // // this should relatively accurately tell us what kind of impact the buffering // and writing have on the serialization time // // note that send() and recv() degenerate into sendmsg() and recvmsg(), at least // on the Linux kernel end, so not using those functions should have no effect enum SocketTy { TryTcp, TryUnix, } enum StructType { Small, Large, } use SocketTy::*; use StructType::*; let mut group = c.benchmark_group("serialize_struct"); for socket_ty in &[TryTcp, TryUnix] { let mut fd: Box<dyn Write> = match socket_ty { TryTcp => { const PORT: u16 = 41234; let listen = TcpListener::bind(("::1", PORT)).unwrap(); thread::spawn(move || { let (mut sock, _) = listen.accept().unwrap(); // read until other sock gets dropped let mut buf = [0u8; 1024]; loop { if sock.read(&mut buf).is_err() { break; } } }); let sock = TcpStream::connect(("::1", PORT)).unwrap(); Box::new(sock) } TryUnix => { #[cfg(unix)] { let (mut left, right) = UnixStream::pair().unwrap(); thread::spawn(move || { let mut buf = [0u8; 1024]; loop { if left.read(&mut buf).is_err() { break; } } }); Box::new(right) } #[cfg(not(unix))] { continue; } } }; let try_desc = match socket_ty { TryTcp => "TCP", TryUnix => "Unix", }; for struct_size in &[Small, Large] { let size_desc = match struct_size { Small => "small", Large => "large", }; let name = format!("serialize_struct {} {}", try_desc, size_desc); group.bench_function(name, |b| { b.iter(|| { let bytes = match struct_size { Small => { let rect = Rectangle::default(); black_box(rect.serialize()).to_vec() } Large => { let mut screen = Screen::default(); screen.allowed_depths.resize_with(3, Default::default); black_box(screen.serialize()) } }; // write the serialized bytes tothe output fd.write_all(&bytes).unwrap(); }) }); } } } fn discard_reply(c: &mut Criterion) { // Measure the performance of discard_reply() fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) { let mut conn = Connection::new(); let seqnos = (0..pow(2, 13)) .map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap()) .collect(); (conn, seqnos) } let mut group = c.benchmark_group("discard_reply"); group.bench_function("discard oldest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard newest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard all forward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); group.bench_function("discard all backward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos.into_iter().rev() { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); } criterion_group!( benches, enqueue_packet_test, send_and_receive_request, try_parse_small_struct, try_parse_large_struct, serialize_struct, discard_reply, ); criterion_main!(benches);
pow
identifier_name
proto_connection.rs
//! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of //! capacity. use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; use std::{ io::{Read, Write}, mem::{replace, size_of}, net::{TcpListener, TcpStream}, thread, }; use x11rb_protocol::{ connection::{Connection, ReplyFdKind}, protocol::xproto::{Depth, Rectangle, Screen}, x11_utils::{Serialize, TryParse}, DiscardMode, SequenceNumber, }; #[cfg(unix)] use std::os::unix::net::UnixStream; fn pow(i: i32, p: i32) -> i32 { let mut result = 1; for _ in 0..p { result *= i; } result } fn enqueue_packet_test(c: &mut Criterion) { // take the cartesian product of the following conditions: // - the packet is an event, a reply, or an error // - pending_events and pending_replies are empty, have one element, or have // many elements enum PacketType { Event, Reply, Error, } enum PacketCount { Empty, One, Many, } use PacketCount::*; use PacketType::*; let mut group = c.benchmark_group("enqueue_packet"); for packet_ty in &[Event, Reply, Error] { for packet_count in &[Empty, One, Many] { let packet_ty_desc = match packet_ty { Event => "event", Reply => "reply", Error => "error", }; let packet_count_desc = match packet_count { Empty => "no", One => "one", Many => "many", }; let name = format!( "enqueue_packet {} with {} packets", packet_ty_desc, packet_count_desc ); group.bench_function(name, |b| { // generate a valid packet with the given first byte and sequence number let mut seqno = 0u16; let mut packet = move |ind: u8| { let our_seqno = seqno + 1; seqno += 1; let mut v = vec![0; 32]; v[0] = ind; // copy our_seqno to bytes 3 and 4 v[2..4].copy_from_slice(&our_seqno.to_ne_bytes()); v }; // we need another one for make_conn let mut packet2 = packet; let queue_count = match packet_count { PacketCount::Empty => 0, PacketCount::One => 1, PacketCount::Many => pow(2, 8), }; // create a connection with the given stats let mut make_conn = || { let mut conn = Connection::new(); for _ in 0..queue_count { // push a new event conn.enqueue_packet(packet2(2)); } for _ in 0..queue_count { // push a new reply conn.enqueue_packet(packet2(1)); } conn }; let mut conn = make_conn(); let packet = packet(match packet_ty { Event => 2, Reply => 1, Error => 0, }); b.iter(move || { conn.enqueue_packet(packet.clone()); }) }); } } } fn send_and_receive_request(c: &mut Criterion) { // permutations: // - send queue is empty or very full // - receive queue is empty of very full enum SendQueue { SEmpty, SFull, } enum RecvQueue { REmpty, RFull, } use RecvQueue::*; use SendQueue::*; let mut group = c.benchmark_group("send_and_receive_request"); for send_queue in &[SEmpty, SFull] { for recv_queue in &[REmpty, RFull] { let name = format!( "send_and_receive_request (send {}, recv {})", match send_queue { SEmpty => "empty", SFull => "full", }, match recv_queue { REmpty => "empty", RFull => "full", } ); group.bench_function(name, |b| { // create a new connection let mut conn = Connection::new(); // if the send queue needs to be full, flood it with sent requests if matches!(send_queue, SFull) { for _ in 0..pow(2, 14) { conn.send_request(match recv_queue { REmpty => ReplyFdKind::NoReply, RFull => ReplyFdKind::ReplyWithoutFDs, }); } } // if the recv queue needs to be full, flood it with replies if matches!(recv_queue, RFull) { for _ in 0..pow(2, 14) { let mut packet = vec![0; 32]; packet[0] = 1; conn.enqueue_packet(packet); } } // create a new packet let mut packet = vec![0u8; 32]; packet[0] = 1; b.iter(move || { // send our request let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap(); // truncate to a u16 let seq_trunc = seq as u16; // insert the sequence number at positions 2 and 3 packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes()); // enqueue the packet conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32]))); // pop the reply conn.poll_for_reply_or_error(seq) }) }); } } } fn try_parse_small_struct(c: &mut Criterion) { // xproto::Rectangle is a pointer wide on 64-bit, use that c.bench_function("try_parse an xproto::Rectangle", |b| { let packet = [0x42u8; size_of::<Rectangle>()]; b.iter(|| Rectangle::try_parse(black_box(&packet))) }); } fn try_parse_large_struct(c: &mut Criterion) { // xproto::Screen is a significantly larger structure, use that const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>(); const NUM_DEPTHS: usize = 3; const DEPTH_SIZE: usize = 8; const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE); c.bench_function("try_parse an xproto::Screen", |b| { let mut packet = [0; TOTAL_SIZE]; packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8; b.iter(|| Screen::try_parse(black_box(&packet))) }); } fn serialize_struct(c: &mut Criterion) { // try the following: // - send it down a TCP socket // - send it down a Unix socket (if linux) // // this should relatively accurately tell us what kind of impact the buffering // and writing have on the serialization time // // note that send() and recv() degenerate into sendmsg() and recvmsg(), at least // on the Linux kernel end, so not using those functions should have no effect enum SocketTy { TryTcp, TryUnix, } enum StructType { Small, Large, } use SocketTy::*; use StructType::*; let mut group = c.benchmark_group("serialize_struct"); for socket_ty in &[TryTcp, TryUnix] { let mut fd: Box<dyn Write> = match socket_ty { TryTcp => { const PORT: u16 = 41234; let listen = TcpListener::bind(("::1", PORT)).unwrap(); thread::spawn(move || { let (mut sock, _) = listen.accept().unwrap(); // read until other sock gets dropped let mut buf = [0u8; 1024]; loop { if sock.read(&mut buf).is_err() { break; } } }); let sock = TcpStream::connect(("::1", PORT)).unwrap(); Box::new(sock) } TryUnix => { #[cfg(unix)] { let (mut left, right) = UnixStream::pair().unwrap(); thread::spawn(move || { let mut buf = [0u8; 1024]; loop {
if left.read(&mut buf).is_err() { break; } } }); Box::new(right) } #[cfg(not(unix))] { continue; } } }; let try_desc = match socket_ty { TryTcp => "TCP", TryUnix => "Unix", }; for struct_size in &[Small, Large] { let size_desc = match struct_size { Small => "small", Large => "large", }; let name = format!("serialize_struct {} {}", try_desc, size_desc); group.bench_function(name, |b| { b.iter(|| { let bytes = match struct_size { Small => { let rect = Rectangle::default(); black_box(rect.serialize()).to_vec() } Large => { let mut screen = Screen::default(); screen.allowed_depths.resize_with(3, Default::default); black_box(screen.serialize()) } }; // write the serialized bytes tothe output fd.write_all(&bytes).unwrap(); }) }); } } } fn discard_reply(c: &mut Criterion) { // Measure the performance of discard_reply() fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) { let mut conn = Connection::new(); let seqnos = (0..pow(2, 13)) .map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap()) .collect(); (conn, seqnos) } let mut group = c.benchmark_group("discard_reply"); group.bench_function("discard oldest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard newest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard all forward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); group.bench_function("discard all backward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos.into_iter().rev() { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); } criterion_group!( benches, enqueue_packet_test, send_and_receive_request, try_parse_small_struct, try_parse_large_struct, serialize_struct, discard_reply, ); criterion_main!(benches);
random_line_split
proto_connection.rs
//! Benchmark the `x11rb_protocol::Connection` type's method, at varying levels of //! capacity. use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; use std::{ io::{Read, Write}, mem::{replace, size_of}, net::{TcpListener, TcpStream}, thread, }; use x11rb_protocol::{ connection::{Connection, ReplyFdKind}, protocol::xproto::{Depth, Rectangle, Screen}, x11_utils::{Serialize, TryParse}, DiscardMode, SequenceNumber, }; #[cfg(unix)] use std::os::unix::net::UnixStream; fn pow(i: i32, p: i32) -> i32 { let mut result = 1; for _ in 0..p { result *= i; } result } fn enqueue_packet_test(c: &mut Criterion) { // take the cartesian product of the following conditions: // - the packet is an event, a reply, or an error // - pending_events and pending_replies are empty, have one element, or have // many elements enum PacketType { Event, Reply, Error, } enum PacketCount { Empty, One, Many, } use PacketCount::*; use PacketType::*; let mut group = c.benchmark_group("enqueue_packet"); for packet_ty in &[Event, Reply, Error] { for packet_count in &[Empty, One, Many] { let packet_ty_desc = match packet_ty { Event => "event", Reply => "reply", Error => "error", }; let packet_count_desc = match packet_count { Empty => "no", One => "one", Many => "many", }; let name = format!( "enqueue_packet {} with {} packets", packet_ty_desc, packet_count_desc ); group.bench_function(name, |b| { // generate a valid packet with the given first byte and sequence number let mut seqno = 0u16; let mut packet = move |ind: u8| { let our_seqno = seqno + 1; seqno += 1; let mut v = vec![0; 32]; v[0] = ind; // copy our_seqno to bytes 3 and 4 v[2..4].copy_from_slice(&our_seqno.to_ne_bytes()); v }; // we need another one for make_conn let mut packet2 = packet; let queue_count = match packet_count { PacketCount::Empty => 0, PacketCount::One => 1, PacketCount::Many => pow(2, 8), }; // create a connection with the given stats let mut make_conn = || { let mut conn = Connection::new(); for _ in 0..queue_count { // push a new event conn.enqueue_packet(packet2(2)); } for _ in 0..queue_count { // push a new reply conn.enqueue_packet(packet2(1)); } conn }; let mut conn = make_conn(); let packet = packet(match packet_ty { Event => 2, Reply => 1, Error => 0, }); b.iter(move || { conn.enqueue_packet(packet.clone()); }) }); } } } fn send_and_receive_request(c: &mut Criterion)
for recv_queue in &[REmpty, RFull] { let name = format!( "send_and_receive_request (send {}, recv {})", match send_queue { SEmpty => "empty", SFull => "full", }, match recv_queue { REmpty => "empty", RFull => "full", } ); group.bench_function(name, |b| { // create a new connection let mut conn = Connection::new(); // if the send queue needs to be full, flood it with sent requests if matches!(send_queue, SFull) { for _ in 0..pow(2, 14) { conn.send_request(match recv_queue { REmpty => ReplyFdKind::NoReply, RFull => ReplyFdKind::ReplyWithoutFDs, }); } } // if the recv queue needs to be full, flood it with replies if matches!(recv_queue, RFull) { for _ in 0..pow(2, 14) { let mut packet = vec![0; 32]; packet[0] = 1; conn.enqueue_packet(packet); } } // create a new packet let mut packet = vec![0u8; 32]; packet[0] = 1; b.iter(move || { // send our request let seq = conn.send_request(ReplyFdKind::ReplyWithoutFDs).unwrap(); // truncate to a u16 let seq_trunc = seq as u16; // insert the sequence number at positions 2 and 3 packet[2..4].copy_from_slice(&seq_trunc.to_ne_bytes()); // enqueue the packet conn.enqueue_packet(black_box(replace(&mut packet, vec![0u8; 32]))); // pop the reply conn.poll_for_reply_or_error(seq) }) }); } } } fn try_parse_small_struct(c: &mut Criterion) { // xproto::Rectangle is a pointer wide on 64-bit, use that c.bench_function("try_parse an xproto::Rectangle", |b| { let packet = [0x42u8; size_of::<Rectangle>()]; b.iter(|| Rectangle::try_parse(black_box(&packet))) }); } fn try_parse_large_struct(c: &mut Criterion) { // xproto::Screen is a significantly larger structure, use that const SCREEN_BASE_SIZE: usize = size_of::<Screen>() - size_of::<Vec<Depth>>() + size_of::<u8>(); const NUM_DEPTHS: usize = 3; const DEPTH_SIZE: usize = 8; const TOTAL_SIZE: usize = SCREEN_BASE_SIZE + (NUM_DEPTHS * DEPTH_SIZE); c.bench_function("try_parse an xproto::Screen", |b| { let mut packet = [0; TOTAL_SIZE]; packet[SCREEN_BASE_SIZE - 1] = NUM_DEPTHS as u8; b.iter(|| Screen::try_parse(black_box(&packet))) }); } fn serialize_struct(c: &mut Criterion) { // try the following: // - send it down a TCP socket // - send it down a Unix socket (if linux) // // this should relatively accurately tell us what kind of impact the buffering // and writing have on the serialization time // // note that send() and recv() degenerate into sendmsg() and recvmsg(), at least // on the Linux kernel end, so not using those functions should have no effect enum SocketTy { TryTcp, TryUnix, } enum StructType { Small, Large, } use SocketTy::*; use StructType::*; let mut group = c.benchmark_group("serialize_struct"); for socket_ty in &[TryTcp, TryUnix] { let mut fd: Box<dyn Write> = match socket_ty { TryTcp => { const PORT: u16 = 41234; let listen = TcpListener::bind(("::1", PORT)).unwrap(); thread::spawn(move || { let (mut sock, _) = listen.accept().unwrap(); // read until other sock gets dropped let mut buf = [0u8; 1024]; loop { if sock.read(&mut buf).is_err() { break; } } }); let sock = TcpStream::connect(("::1", PORT)).unwrap(); Box::new(sock) } TryUnix => { #[cfg(unix)] { let (mut left, right) = UnixStream::pair().unwrap(); thread::spawn(move || { let mut buf = [0u8; 1024]; loop { if left.read(&mut buf).is_err() { break; } } }); Box::new(right) } #[cfg(not(unix))] { continue; } } }; let try_desc = match socket_ty { TryTcp => "TCP", TryUnix => "Unix", }; for struct_size in &[Small, Large] { let size_desc = match struct_size { Small => "small", Large => "large", }; let name = format!("serialize_struct {} {}", try_desc, size_desc); group.bench_function(name, |b| { b.iter(|| { let bytes = match struct_size { Small => { let rect = Rectangle::default(); black_box(rect.serialize()).to_vec() } Large => { let mut screen = Screen::default(); screen.allowed_depths.resize_with(3, Default::default); black_box(screen.serialize()) } }; // write the serialized bytes tothe output fd.write_all(&bytes).unwrap(); }) }); } } } fn discard_reply(c: &mut Criterion) { // Measure the performance of discard_reply() fn get_connection_and_seqnos() -> (Connection, Vec<SequenceNumber>) { let mut conn = Connection::new(); let seqnos = (0..pow(2, 13)) .map(|_| conn.send_request(ReplyFdKind::NoReply).unwrap()) .collect(); (conn, seqnos) } let mut group = c.benchmark_group("discard_reply"); group.bench_function("discard oldest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.first().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard newest", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { conn.discard_reply(*seqnos.last().unwrap(), DiscardMode::DiscardReply) }, BatchSize::SmallInput, ); }); group.bench_function("discard all forward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); group.bench_function("discard all backward", |b| { b.iter_batched( get_connection_and_seqnos, |(mut conn, seqnos)| { for seqno in seqnos.into_iter().rev() { conn.discard_reply(seqno, DiscardMode::DiscardReply) } }, BatchSize::SmallInput, ); }); } criterion_group!( benches, enqueue_packet_test, send_and_receive_request, try_parse_small_struct, try_parse_large_struct, serialize_struct, discard_reply, ); criterion_main!(benches);
{ // permutations: // - send queue is empty or very full // - receive queue is empty of very full enum SendQueue { SEmpty, SFull, } enum RecvQueue { REmpty, RFull, } use RecvQueue::*; use SendQueue::*; let mut group = c.benchmark_group("send_and_receive_request"); for send_queue in &[SEmpty, SFull] {
identifier_body
main.rs
use std::collections::HashMap; use std::collections::HashSet; use std::io; use std::io::Read; use std::iter::Peekable; use std::slice::Iter; #[derive(Clone)] enum Match { Literal(char), Alternation(Vec<Match>), Concatenation(Vec<Match>), } impl std::fmt::Debug for Match { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Match::Literal(c) => write!(f, "{}", c)?, Match::Concatenation(xs) => { for x in xs.iter() { x.fmt(f)?; } } Match::Alternation(xs) => { // We could do precedence-based printing, but let's always put them in... let mut first = true; for x in xs.iter() { write!(f, "{}", if first {'('} else {'|'})?; first = false; x.fmt(f)?; } write!(f, ")")?; } } Ok(()) } } fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match { // Current alternation, made of a sequence of concatentations. let mut alternatives = Vec::new(); // Current concatenation being built. let mut curr = Vec::new(); loop { let c = match iter.peek() { Some(c) => Some(*c), None => None, }; match c { Some('(') => { iter.next(); curr.push(parse_regexp(iter)); if iter.next()!= Some(&')') { panic!("Imbalanced brackets"); } } Some('|') => { iter.next(); alternatives.push(Match::Concatenation(curr)); curr = Vec::new(); } Some(')') => break, None => break, Some(c) => { curr.push(Match::Literal(*c)); iter.next(); } } } alternatives.push(Match::Concatenation(curr)); Match::Alternation(alternatives) } //////////////////////////////////////////////////////////////////////// // This is the bit for problem 20a... // // This just cleans up the regexp tree, without understanding paths. fn opt_regexp(m: Match) -> Match { match m { Match::Alternation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Alternation(xs) } } Match::Concatenation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Concatenation(xs) } } Match::Literal(_) => m, } } // This removes obvious, basic back-tracking (back-tracking that // occurs only within a single concatenation of literals). fn opt_backtracks(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_backtracks).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>(); let mut i = 0; while i + 1 < xs.len() { if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) { match (a, b) { ('N', 'S') => true, ('S', 'N') => true, ('W', 'E') => true, ('E', 'W') => true, _ => false, } } else { false } { xs.drain(i..i+2); if i > 0 { i -= 1; } } else { i += 1; } } Match::Concatenation(xs) } } } // Is this an empty match? Used by opt_empties. fn is_empty(m: &Match) -> bool { match m { Match::Literal(_) => false, Match::Concatenation(xs) => xs.iter().all(is_empty), Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty), } } // And this removes alternatives of thing from concatenations. It's a // specific optimisation, but seems key to this exercise. fn opt_empties(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_empties).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x|!is_empty(x)).collect()) } } } //////////////////////////////////////////////////////////////////////// // Problem 20b part // // Find the route to the turning point for a sequence of literals fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> { if xs.len() == 0 { return None; } for elem in xs.iter().zip(xs.iter().rev()) { match elem { (Match::Literal('N'), Match::Literal('S')) => (), (Match::Literal('S'), Match::Literal('N')) => (), (Match::Literal('W'), Match::Literal('E')) => (), (Match::Literal('E'), Match::Literal('W')) => (), _ => return None, } } Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect()) } // Given a route that involves back-tracks, generate a list of routes // up to the turning-around point. e.g. NEWS -> NE. fn get_partials(m: &Match) -> Vec<Match>
element.push(partial); res.push(Match::Concatenation(element)); } } res } } } } } //////////////////////////////////////////////////////////////////////// // Generate all the possible strings. // fn generate_all(m: &Match) -> HashSet<String> { let mut res: HashSet<String> = HashSet::new(); match m { Match::Literal(x) => { res.insert(x.to_string()); () } Match::Alternation(xs) => { for x in xs.iter() { res.extend(generate_all(x).into_iter()); } } Match::Concatenation(xs) => { // Ugh. Cross products are potentially expensive. res.insert(String::new()); for x in xs.iter() { let to_cross = generate_all(x); add_cross_string(&mut res, &to_cross); } } } res } fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) { let mut res = HashSet::new(); for s1 in lhs.iter() { for s2 in rhs.iter() { let mut s = s1.clone(); s.push_str(&s2); res.insert(s); } } // This is where I'd like to swap lhs and res. lhs.clear(); lhs.extend(res.into_iter()); } // Generate all the incremental paths fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> { let mut seen = HashSet::new(); for str in strs.iter() { for l in 0..str.len() { seen.insert(str.get(0..l+1).unwrap().to_string()); } } seen } // Given a path, generate the coordinates of its end point. fn get_coords(s: &str) -> (i32, i32) { let y = s.chars().map(|c| match c { 'N' => 1, 'S' => -1, _ => 0, }).sum(); let x = s.chars().map(|c| match c { 'E' => 1, 'W' => -1, _ => 0, }).sum(); (x, y) } // Build a mapping from coord to shortest route there. fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> { let mut map = HashMap::new(); for s in strs.iter() { let xy = get_coords(s); let l = s.len(); let e = map.entry(xy).or_insert(1000000); if l < *e { *e = l; } } map } // Count the long routes fn count_long(l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize { mapping.iter().filter(|(_, l2)| **l2 >= l).count() } fn main() { let mut buffer = String::new(); io::stdin().read_to_string(&mut buffer).expect("Read error"); let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>(); // println!("{:?}\n", chars); let res = parse_regexp(&mut chars.iter().peekable()); // println!("{:?}\n", res); // All the backtracks form a trivial pattern, so we'll extract all // the routes up to a backtrack (plus original route). let mut partials = get_partials(&res); partials.push(res); // println!("{:?}\n", partials); // Then we'll eliminate the back-tracks, etc. let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>(); // println!("{:?}\n", partials); println!("{}\n", partials.len()); // And now build the regexp of doom. let regex = Match::Alternation(partials); let all = generate_all(&regex); // println!("{:?}\n", all); println!("{}\n", all.len()); // We have all the paths, now generate all the partial paths. let prefixes = all_prefixes(&all); println!("{}\n", prefixes.len()); // Some paths will overlap, so for each coordinate, find the shortest path there. let mapping = build_mapping(&prefixes); println!("{}\n", mapping.len()); // And find the count of coordinates over length 1000. println!("{}\n", count_long(1000, &mapping)); // My, that was really, really tedious. // If I'd known you could just generate all of the paths in // sensible time once you'd taken out the obvious // backtracking... *sigh*. }
{ match m { Match::Alternation(xs) => { let mut res = Vec::new(); for alternative in xs.iter() { res.extend(get_partials(alternative).into_iter()); } res } // A single literal will have no backtrackable parts. Match::Literal(_) => Vec::new(), Match::Concatenation(xs) => { match get_literal_partial(xs) { Some(x) => vec![Match::Concatenation(x)], None => { let mut res = Vec::new(); for i in 0..xs.len() { let partials = get_partials(&xs[i]); for partial in partials.into_iter() { let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>();
identifier_body
main.rs
use std::collections::HashMap; use std::collections::HashSet; use std::io; use std::io::Read; use std::iter::Peekable; use std::slice::Iter; #[derive(Clone)] enum Match { Literal(char), Alternation(Vec<Match>), Concatenation(Vec<Match>), } impl std::fmt::Debug for Match { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Match::Literal(c) => write!(f, "{}", c)?, Match::Concatenation(xs) => { for x in xs.iter() { x.fmt(f)?; } } Match::Alternation(xs) => { // We could do precedence-based printing, but let's always put them in... let mut first = true; for x in xs.iter() { write!(f, "{}", if first {'('} else {'|'})?; first = false; x.fmt(f)?; } write!(f, ")")?; } } Ok(()) } } fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match { // Current alternation, made of a sequence of concatentations. let mut alternatives = Vec::new(); // Current concatenation being built. let mut curr = Vec::new(); loop { let c = match iter.peek() { Some(c) => Some(*c), None => None, }; match c { Some('(') => { iter.next(); curr.push(parse_regexp(iter)); if iter.next()!= Some(&')') { panic!("Imbalanced brackets"); } } Some('|') => { iter.next(); alternatives.push(Match::Concatenation(curr)); curr = Vec::new(); } Some(')') => break, None => break, Some(c) => { curr.push(Match::Literal(*c)); iter.next(); } } } alternatives.push(Match::Concatenation(curr)); Match::Alternation(alternatives) } //////////////////////////////////////////////////////////////////////// // This is the bit for problem 20a... // // This just cleans up the regexp tree, without understanding paths. fn opt_regexp(m: Match) -> Match { match m { Match::Alternation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Alternation(xs) } } Match::Concatenation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Concatenation(xs) } } Match::Literal(_) => m, } } // This removes obvious, basic back-tracking (back-tracking that // occurs only within a single concatenation of literals). fn opt_backtracks(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_backtracks).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>(); let mut i = 0; while i + 1 < xs.len() { if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) { match (a, b) { ('N', 'S') => true, ('S', 'N') => true, ('W', 'E') => true, ('E', 'W') => true, _ => false, } } else { false } { xs.drain(i..i+2); if i > 0 { i -= 1; } } else { i += 1; } } Match::Concatenation(xs)
} } } // Is this an empty match? Used by opt_empties. fn is_empty(m: &Match) -> bool { match m { Match::Literal(_) => false, Match::Concatenation(xs) => xs.iter().all(is_empty), Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty), } } // And this removes alternatives of thing from concatenations. It's a // specific optimisation, but seems key to this exercise. fn opt_empties(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_empties).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x|!is_empty(x)).collect()) } } } //////////////////////////////////////////////////////////////////////// // Problem 20b part // // Find the route to the turning point for a sequence of literals fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> { if xs.len() == 0 { return None; } for elem in xs.iter().zip(xs.iter().rev()) { match elem { (Match::Literal('N'), Match::Literal('S')) => (), (Match::Literal('S'), Match::Literal('N')) => (), (Match::Literal('W'), Match::Literal('E')) => (), (Match::Literal('E'), Match::Literal('W')) => (), _ => return None, } } Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect()) } // Given a route that involves back-tracks, generate a list of routes // up to the turning-around point. e.g. NEWS -> NE. fn get_partials(m: &Match) -> Vec<Match> { match m { Match::Alternation(xs) => { let mut res = Vec::new(); for alternative in xs.iter() { res.extend(get_partials(alternative).into_iter()); } res } // A single literal will have no backtrackable parts. Match::Literal(_) => Vec::new(), Match::Concatenation(xs) => { match get_literal_partial(xs) { Some(x) => vec![Match::Concatenation(x)], None => { let mut res = Vec::new(); for i in 0..xs.len() { let partials = get_partials(&xs[i]); for partial in partials.into_iter() { let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>(); element.push(partial); res.push(Match::Concatenation(element)); } } res } } } } } //////////////////////////////////////////////////////////////////////// // Generate all the possible strings. // fn generate_all(m: &Match) -> HashSet<String> { let mut res: HashSet<String> = HashSet::new(); match m { Match::Literal(x) => { res.insert(x.to_string()); () } Match::Alternation(xs) => { for x in xs.iter() { res.extend(generate_all(x).into_iter()); } } Match::Concatenation(xs) => { // Ugh. Cross products are potentially expensive. res.insert(String::new()); for x in xs.iter() { let to_cross = generate_all(x); add_cross_string(&mut res, &to_cross); } } } res } fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) { let mut res = HashSet::new(); for s1 in lhs.iter() { for s2 in rhs.iter() { let mut s = s1.clone(); s.push_str(&s2); res.insert(s); } } // This is where I'd like to swap lhs and res. lhs.clear(); lhs.extend(res.into_iter()); } // Generate all the incremental paths fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> { let mut seen = HashSet::new(); for str in strs.iter() { for l in 0..str.len() { seen.insert(str.get(0..l+1).unwrap().to_string()); } } seen } // Given a path, generate the coordinates of its end point. fn get_coords(s: &str) -> (i32, i32) { let y = s.chars().map(|c| match c { 'N' => 1, 'S' => -1, _ => 0, }).sum(); let x = s.chars().map(|c| match c { 'E' => 1, 'W' => -1, _ => 0, }).sum(); (x, y) } // Build a mapping from coord to shortest route there. fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> { let mut map = HashMap::new(); for s in strs.iter() { let xy = get_coords(s); let l = s.len(); let e = map.entry(xy).or_insert(1000000); if l < *e { *e = l; } } map } // Count the long routes fn count_long(l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize { mapping.iter().filter(|(_, l2)| **l2 >= l).count() } fn main() { let mut buffer = String::new(); io::stdin().read_to_string(&mut buffer).expect("Read error"); let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>(); // println!("{:?}\n", chars); let res = parse_regexp(&mut chars.iter().peekable()); // println!("{:?}\n", res); // All the backtracks form a trivial pattern, so we'll extract all // the routes up to a backtrack (plus original route). let mut partials = get_partials(&res); partials.push(res); // println!("{:?}\n", partials); // Then we'll eliminate the back-tracks, etc. let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>(); // println!("{:?}\n", partials); println!("{}\n", partials.len()); // And now build the regexp of doom. let regex = Match::Alternation(partials); let all = generate_all(&regex); // println!("{:?}\n", all); println!("{}\n", all.len()); // We have all the paths, now generate all the partial paths. let prefixes = all_prefixes(&all); println!("{}\n", prefixes.len()); // Some paths will overlap, so for each coordinate, find the shortest path there. let mapping = build_mapping(&prefixes); println!("{}\n", mapping.len()); // And find the count of coordinates over length 1000. println!("{}\n", count_long(1000, &mapping)); // My, that was really, really tedious. // If I'd known you could just generate all of the paths in // sensible time once you'd taken out the obvious // backtracking... *sigh*. }
random_line_split
main.rs
use std::collections::HashMap; use std::collections::HashSet; use std::io; use std::io::Read; use std::iter::Peekable; use std::slice::Iter; #[derive(Clone)] enum Match { Literal(char), Alternation(Vec<Match>), Concatenation(Vec<Match>), } impl std::fmt::Debug for Match { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { Match::Literal(c) => write!(f, "{}", c)?, Match::Concatenation(xs) => { for x in xs.iter() { x.fmt(f)?; } } Match::Alternation(xs) => { // We could do precedence-based printing, but let's always put them in... let mut first = true; for x in xs.iter() { write!(f, "{}", if first {'('} else {'|'})?; first = false; x.fmt(f)?; } write!(f, ")")?; } } Ok(()) } } fn parse_regexp(iter: &mut Peekable<Iter<char>>) -> Match { // Current alternation, made of a sequence of concatentations. let mut alternatives = Vec::new(); // Current concatenation being built. let mut curr = Vec::new(); loop { let c = match iter.peek() { Some(c) => Some(*c), None => None, }; match c { Some('(') => { iter.next(); curr.push(parse_regexp(iter)); if iter.next()!= Some(&')') { panic!("Imbalanced brackets"); } } Some('|') => { iter.next(); alternatives.push(Match::Concatenation(curr)); curr = Vec::new(); } Some(')') => break, None => break, Some(c) => { curr.push(Match::Literal(*c)); iter.next(); } } } alternatives.push(Match::Concatenation(curr)); Match::Alternation(alternatives) } //////////////////////////////////////////////////////////////////////// // This is the bit for problem 20a... // // This just cleans up the regexp tree, without understanding paths. fn opt_regexp(m: Match) -> Match { match m { Match::Alternation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Alternation(xs) } } Match::Concatenation(xs) => { let xs: Vec<Match> = xs.into_iter().map(opt_regexp).collect(); if xs.len() == 1 { // Take first element, and discard rest. xs.into_iter().next().unwrap() } else { Match::Concatenation(xs) } } Match::Literal(_) => m, } } // This removes obvious, basic back-tracking (back-tracking that // occurs only within a single concatenation of literals). fn opt_backtracks(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_backtracks).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { let mut xs = xs.into_iter().map(opt_backtracks).collect::<Vec<_>>(); let mut i = 0; while i + 1 < xs.len() { if if let (Match::Literal(a), Match::Literal(b)) = (&xs[i], &xs[i+1]) { match (a, b) { ('N', 'S') => true, ('S', 'N') => true, ('W', 'E') => true, ('E', 'W') => true, _ => false, } } else { false } { xs.drain(i..i+2); if i > 0 { i -= 1; } } else { i += 1; } } Match::Concatenation(xs) } } } // Is this an empty match? Used by opt_empties. fn is_empty(m: &Match) -> bool { match m { Match::Literal(_) => false, Match::Concatenation(xs) => xs.iter().all(is_empty), Match::Alternation(xs) => xs.len() > 0 && xs.iter().all(is_empty), } } // And this removes alternatives of thing from concatenations. It's a // specific optimisation, but seems key to this exercise. fn opt_empties(m: Match) -> Match { match m { Match::Alternation(xs) => { Match::Alternation(xs.into_iter().map(opt_empties).collect()) } Match::Literal(_) => m, Match::Concatenation(xs) => { Match::Concatenation(xs.into_iter().map(opt_empties).filter(|x|!is_empty(x)).collect()) } } } //////////////////////////////////////////////////////////////////////// // Problem 20b part // // Find the route to the turning point for a sequence of literals fn get_literal_partial(xs: &[Match]) -> Option<Vec<Match>> { if xs.len() == 0 { return None; } for elem in xs.iter().zip(xs.iter().rev()) { match elem { (Match::Literal('N'), Match::Literal('S')) => (), (Match::Literal('S'), Match::Literal('N')) => (), (Match::Literal('W'), Match::Literal('E')) => (), (Match::Literal('E'), Match::Literal('W')) => (), _ => return None, } } Some(xs.iter().take(xs.len() / 2).map(|x| (*x).clone()).collect()) } // Given a route that involves back-tracks, generate a list of routes // up to the turning-around point. e.g. NEWS -> NE. fn get_partials(m: &Match) -> Vec<Match> { match m { Match::Alternation(xs) => { let mut res = Vec::new(); for alternative in xs.iter() { res.extend(get_partials(alternative).into_iter()); } res } // A single literal will have no backtrackable parts. Match::Literal(_) => Vec::new(), Match::Concatenation(xs) => { match get_literal_partial(xs) { Some(x) => vec![Match::Concatenation(x)], None => { let mut res = Vec::new(); for i in 0..xs.len() { let partials = get_partials(&xs[i]); for partial in partials.into_iter() { let mut element = xs.iter().take(i).map(|x| (*x).clone()).collect::<Vec<Match>>(); element.push(partial); res.push(Match::Concatenation(element)); } } res } } } } } //////////////////////////////////////////////////////////////////////// // Generate all the possible strings. // fn generate_all(m: &Match) -> HashSet<String> { let mut res: HashSet<String> = HashSet::new(); match m { Match::Literal(x) => { res.insert(x.to_string()); () } Match::Alternation(xs) => { for x in xs.iter() { res.extend(generate_all(x).into_iter()); } } Match::Concatenation(xs) => { // Ugh. Cross products are potentially expensive. res.insert(String::new()); for x in xs.iter() { let to_cross = generate_all(x); add_cross_string(&mut res, &to_cross); } } } res } fn add_cross_string(lhs: &mut HashSet<String>, rhs: &HashSet<String>) { let mut res = HashSet::new(); for s1 in lhs.iter() { for s2 in rhs.iter() { let mut s = s1.clone(); s.push_str(&s2); res.insert(s); } } // This is where I'd like to swap lhs and res. lhs.clear(); lhs.extend(res.into_iter()); } // Generate all the incremental paths fn all_prefixes(strs: &HashSet<String>) -> HashSet<String> { let mut seen = HashSet::new(); for str in strs.iter() { for l in 0..str.len() { seen.insert(str.get(0..l+1).unwrap().to_string()); } } seen } // Given a path, generate the coordinates of its end point. fn get_coords(s: &str) -> (i32, i32) { let y = s.chars().map(|c| match c { 'N' => 1, 'S' => -1, _ => 0, }).sum(); let x = s.chars().map(|c| match c { 'E' => 1, 'W' => -1, _ => 0, }).sum(); (x, y) } // Build a mapping from coord to shortest route there. fn build_mapping(strs: &HashSet<String>) -> HashMap<(i32, i32), usize> { let mut map = HashMap::new(); for s in strs.iter() { let xy = get_coords(s); let l = s.len(); let e = map.entry(xy).or_insert(1000000); if l < *e { *e = l; } } map } // Count the long routes fn
(l: usize, mapping: &HashMap<(i32, i32), usize>) -> usize { mapping.iter().filter(|(_, l2)| **l2 >= l).count() } fn main() { let mut buffer = String::new(); io::stdin().read_to_string(&mut buffer).expect("Read error"); let chars = buffer.replace('^', "").replace('$', "").trim().chars().collect::<Vec<_>>(); // println!("{:?}\n", chars); let res = parse_regexp(&mut chars.iter().peekable()); // println!("{:?}\n", res); // All the backtracks form a trivial pattern, so we'll extract all // the routes up to a backtrack (plus original route). let mut partials = get_partials(&res); partials.push(res); // println!("{:?}\n", partials); // Then we'll eliminate the back-tracks, etc. let partials = partials.into_iter().map(|x| opt_empties(opt_backtracks(opt_regexp(x)))).collect::<Vec<_>>(); // println!("{:?}\n", partials); println!("{}\n", partials.len()); // And now build the regexp of doom. let regex = Match::Alternation(partials); let all = generate_all(&regex); // println!("{:?}\n", all); println!("{}\n", all.len()); // We have all the paths, now generate all the partial paths. let prefixes = all_prefixes(&all); println!("{}\n", prefixes.len()); // Some paths will overlap, so for each coordinate, find the shortest path there. let mapping = build_mapping(&prefixes); println!("{}\n", mapping.len()); // And find the count of coordinates over length 1000. println!("{}\n", count_long(1000, &mapping)); // My, that was really, really tedious. // If I'd known you could just generate all of the paths in // sensible time once you'd taken out the obvious // backtracking... *sigh*. }
count_long
identifier_name
river.rs
#[cfg(test)] extern crate gag; use std::rc::{Rc, Weak}; use std::cell::{RefCell, RefMut, Ref}; use tick::Tick; use salmon::{Salmon, Age, Direction}; use split_custom_escape::HomespringSplit; use program::Program; #[derive(Debug, PartialEq, Eq)] pub enum NodeType { Other(String), Hatchery, HydroPower, Snowmelt, Shallows(u8), Rapids(u8), AppendDown, Bear, ForceField, Sense, Clone, YoungBear, Bird, UpstreamKillingDevice, Waterfall, Universe, Powers, Marshy, Insulted, UpstreamSense, DownstreamSense, Evaporates, YouthFountain, Oblivion, Pump, RangeSense, Fear, ReverseUp, ReverseDown, Time, Lock,
InverseLock, YoungSense, Switch, YoungSwitch, Narrows, AppendUp, YoungRangeSense, Net, ForceDown, ForceUp, Spawn, PowerInvert, Current, Bridge, Split, RangeSwitch, YoungRangeSwitch, } impl NodeType { pub fn from_name(name: &str) -> NodeType { // unimplemented!(); use self::NodeType::*; match &name.to_lowercase()[..] { "hatchery" => Hatchery, "hydro. power" => HydroPower, "snowmelt" => Snowmelt, "shallows" => Shallows(2), "rapids" => Rapids(2), "append. down" => AppendDown, "bear" => Bear, "force. field" => ForceField, "sense" => Sense, "clone" => Clone, "young bear" => YoungBear, "bird" => Bird, "upstream. killing. device" => UpstreamKillingDevice, "waterfall" => Waterfall, "universe" => Universe, "powers" => Powers, "marshy" => Marshy, "insulated" => Insulted, "upstream. sense" => UpstreamSense, "downstream. sense" => DownstreamSense, "evaporates" => Evaporates, "youth. fountain" => YouthFountain, "oblivion" => Oblivion, "pump" => Pump, "range. sense" => RangeSense, "fear" => Fear, "reverse. up" => ReverseUp, "reverse. down" => ReverseDown, "time" => Time, "lock" => Lock, "inverse. lock" => InverseLock, "young. sense" => YoungSense, "switch" => Switch, "young. switch" => YoungSwitch, "narrows" => Narrows, "append. up" => AppendUp, "young. range. sense" => YoungRangeSense, "net" => Net, "force. down" => ForceDown, "force. up" => ForceUp, "spawn" => Spawn, "power. invert" => PowerInvert, "current" => Current, "bridge" => Bridge, "split" => Split, "range. switch" => RangeSwitch, "young. range. switch" => YoungRangeSwitch, _ => Other(name.to_owned()), } } } #[derive(Debug)] pub struct Node<'a, 'b> { pub name: &'b str, pub node_type: NodeType, pub parent: Weak<RefCell<Node<'a, 'b>>>, pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>, pub salmon: Vec<Salmon<'a>>, pub block_salmon: bool, pub very_block_salmon: bool, pub powered: bool, pub block_power: bool, pub watered: bool, pub block_water: bool, pub snowy: bool, pub block_snow: bool, pub destroyed: bool, } impl<'a, 'b> Node<'a, 'b> { pub fn new(name: &'b str) -> Node<'a, 'b> { let node = Node { name, node_type: NodeType::from_name(name), parent: Weak::new(), children: vec![], salmon: vec![], block_salmon: false, very_block_salmon: false, powered: false, block_power: false, watered: false, block_water: false, snowy: false, block_snow: false, destroyed: false, }; node.init() } fn init(mut self) -> Node<'a, 'b> { use self::NodeType::*; match &self.node_type { &Snowmelt => self.snowy = true, &Powers => self.powered = true, _ => (), } self } pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> { self.children[n].borrow() } pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> { self.children[n].borrow_mut() } pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) { self.children.push(child); } pub fn add_salmon(&mut self, salmon: Salmon<'a>) { self.salmon.push(salmon); } // Returns the index of the child that would lead to the node // with a name of `name`. pub fn find_node_path(&self, name: &str) -> Option<usize> { (0..self.children.len()).position(|i| self.borrow_child(i).find_node(name) ) } // This is supposed to use an in-order search, but that doesn't // really make sense for an n-ary tree... // This will at least be in-order for any nodes with <= 2 children. fn find_node(&self, name: &str) -> bool { let len = self.children.len(); if len > 0 { match self.borrow_child(0).find_node(name) { true => return true, false => (), } } if self.name == name { return true; } if len > 1 { for i in 1..len { match self.borrow_child(i).find_node(name) { true => return true, false => (), } } } false } // something to move fish up and down stream pub fn move_salmon(&mut self, direction: Direction) { match &mut self.node_type { &mut NodeType::Shallows(ref mut i) => if *i > 0 { *i -= 1; return }, &mut NodeType::Rapids(ref mut i) => if *i > 0 { *i -= 1; return }, _ => (), } match direction { Direction::Downstream => { match self.parent.upgrade() { Some(p) => { // Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter let mut p = p.borrow_mut(); let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Downstream { let s = self.salmon.remove(i); p.salmon.push(s); } else { i += 1; } } }, None => { for s in &self.salmon { if s.direction == Direction::Downstream { print!("{}", s.name); } } self.salmon.retain(|s| s.direction!= Direction::Downstream); }, } }, Direction::Upstream => { if self.block_salmon { return } // `Vec::drain_filter` could probably be used here too let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Upstream { let idx = match self.find_node_path(self.salmon[i].name) { Some(idx) if!self.borrow_child(idx).very_block_salmon => Some(idx), _ => self.children.iter().position(|c|!c.borrow().very_block_salmon), }; match idx { Some(idx) => { let s = self.salmon.remove(i); self.borrow_mut_child(idx).salmon.push(s); }, None => i += 1, } } else { i += 1; } } }, } } pub fn tick(&mut self, tick: Tick) { use tick::PropagationOrder::*; match tick.propagation_order() { PostOrder => { for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } self.run_tick(tick); }, PreOrder => { self.run_tick(tick); for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } }, _ => unimplemented!(), } } // TODO: rewrite this, it's crap // I don't like this inside of Node... (or do I...?) fn run_tick(&mut self, tick: Tick) { use self::NodeType::*; use tick::Tick::*; match (tick, &self.node_type) { (Snow, _) => { for i in 0..self.children.len() { if self.borrow_child(i).snowy { self.become_snowy(); break; } } }, (Water, _) => { for i in 0..self.children.len() { if self.borrow_child(i).watered { self.become_watered(); break; } } }, (Power, &HydroPower) => self.powered = self.watered, (FishDown, _) => self.move_salmon(Direction::Downstream), (FishUp, _) => self.move_salmon(Direction::Upstream), (FishHatch, &Hatchery) => if self.is_powered() { self.add_salmon(Salmon { age: Age::Mature, direction: Direction::Upstream, name: "homeless" }); }, _ => (), } } // TODO: I don't like this... pub fn become_snowy(&mut self) { use self::NodeType::*; self.snowy = true; match self.node_type { HydroPower => self.destroyed = true, _ => (), } } pub fn become_watered(&mut self) { self.watered = true; } pub fn is_powered(&self) -> bool { if self.block_power { false } else if self.powered { true } else { self.children.iter().any(|c| { c.borrow_mut().is_powered() }) } } pub fn parse_program(code: &str) -> Program { let mut tokens = HomespringSplit::new(code); let root_node = match tokens.next() { Some(name) => { Rc::new(RefCell::new(Node::new(name))) }, None => return Program::Quine, }; let mut current_node = Rc::clone(&root_node); for tok in tokens { if tok == "" { let parent = current_node.borrow().parent.upgrade().unwrap(); current_node = parent; } else { let child = Rc::new(RefCell::new(Node::new(tok))); child.borrow_mut().parent = Rc::downgrade(&current_node); current_node.borrow_mut().add_child(Rc::clone(&child)); current_node = child; } } Program::River(root_node) } } // #[test] // fn print_salmon_name() { // use std::io::Read; // use self::gag::BufferRedirect; // let name = "fishy fish"; // let s = Salmon { // age: Age::Young, // direction: Direction::Downstream, // name, // }; // let mut river = Node::new("universe"); // river.add_salmon(s); // let mut out = String::new(); // let mut buf = BufferRedirect::stdout().unwrap(); // river.run_tick(Tick::FishDown); // buf.read_to_string(&mut out); // assert_eq!(0, river.salmon.len()); // assert_eq!(&out[..], name); // }
random_line_split
river.rs
#[cfg(test)] extern crate gag; use std::rc::{Rc, Weak}; use std::cell::{RefCell, RefMut, Ref}; use tick::Tick; use salmon::{Salmon, Age, Direction}; use split_custom_escape::HomespringSplit; use program::Program; #[derive(Debug, PartialEq, Eq)] pub enum NodeType { Other(String), Hatchery, HydroPower, Snowmelt, Shallows(u8), Rapids(u8), AppendDown, Bear, ForceField, Sense, Clone, YoungBear, Bird, UpstreamKillingDevice, Waterfall, Universe, Powers, Marshy, Insulted, UpstreamSense, DownstreamSense, Evaporates, YouthFountain, Oblivion, Pump, RangeSense, Fear, ReverseUp, ReverseDown, Time, Lock, InverseLock, YoungSense, Switch, YoungSwitch, Narrows, AppendUp, YoungRangeSense, Net, ForceDown, ForceUp, Spawn, PowerInvert, Current, Bridge, Split, RangeSwitch, YoungRangeSwitch, } impl NodeType { pub fn from_name(name: &str) -> NodeType { // unimplemented!(); use self::NodeType::*; match &name.to_lowercase()[..] { "hatchery" => Hatchery, "hydro. power" => HydroPower, "snowmelt" => Snowmelt, "shallows" => Shallows(2), "rapids" => Rapids(2), "append. down" => AppendDown, "bear" => Bear, "force. field" => ForceField, "sense" => Sense, "clone" => Clone, "young bear" => YoungBear, "bird" => Bird, "upstream. killing. device" => UpstreamKillingDevice, "waterfall" => Waterfall, "universe" => Universe, "powers" => Powers, "marshy" => Marshy, "insulated" => Insulted, "upstream. sense" => UpstreamSense, "downstream. sense" => DownstreamSense, "evaporates" => Evaporates, "youth. fountain" => YouthFountain, "oblivion" => Oblivion, "pump" => Pump, "range. sense" => RangeSense, "fear" => Fear, "reverse. up" => ReverseUp, "reverse. down" => ReverseDown, "time" => Time, "lock" => Lock, "inverse. lock" => InverseLock, "young. sense" => YoungSense, "switch" => Switch, "young. switch" => YoungSwitch, "narrows" => Narrows, "append. up" => AppendUp, "young. range. sense" => YoungRangeSense, "net" => Net, "force. down" => ForceDown, "force. up" => ForceUp, "spawn" => Spawn, "power. invert" => PowerInvert, "current" => Current, "bridge" => Bridge, "split" => Split, "range. switch" => RangeSwitch, "young. range. switch" => YoungRangeSwitch, _ => Other(name.to_owned()), } } } #[derive(Debug)] pub struct Node<'a, 'b> { pub name: &'b str, pub node_type: NodeType, pub parent: Weak<RefCell<Node<'a, 'b>>>, pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>, pub salmon: Vec<Salmon<'a>>, pub block_salmon: bool, pub very_block_salmon: bool, pub powered: bool, pub block_power: bool, pub watered: bool, pub block_water: bool, pub snowy: bool, pub block_snow: bool, pub destroyed: bool, } impl<'a, 'b> Node<'a, 'b> { pub fn new(name: &'b str) -> Node<'a, 'b> { let node = Node { name, node_type: NodeType::from_name(name), parent: Weak::new(), children: vec![], salmon: vec![], block_salmon: false, very_block_salmon: false, powered: false, block_power: false, watered: false, block_water: false, snowy: false, block_snow: false, destroyed: false, }; node.init() } fn init(mut self) -> Node<'a, 'b> { use self::NodeType::*; match &self.node_type { &Snowmelt => self.snowy = true, &Powers => self.powered = true, _ => (), } self } pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> { self.children[n].borrow() } pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> { self.children[n].borrow_mut() } pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) { self.children.push(child); } pub fn add_salmon(&mut self, salmon: Salmon<'a>) { self.salmon.push(salmon); } // Returns the index of the child that would lead to the node // with a name of `name`. pub fn find_node_path(&self, name: &str) -> Option<usize> { (0..self.children.len()).position(|i| self.borrow_child(i).find_node(name) ) } // This is supposed to use an in-order search, but that doesn't // really make sense for an n-ary tree... // This will at least be in-order for any nodes with <= 2 children. fn find_node(&self, name: &str) -> bool { let len = self.children.len(); if len > 0 { match self.borrow_child(0).find_node(name) { true => return true, false => (), } } if self.name == name { return true; } if len > 1 { for i in 1..len { match self.borrow_child(i).find_node(name) { true => return true, false => (), } } } false } // something to move fish up and down stream pub fn move_salmon(&mut self, direction: Direction) { match &mut self.node_type { &mut NodeType::Shallows(ref mut i) => if *i > 0 { *i -= 1; return }, &mut NodeType::Rapids(ref mut i) => if *i > 0 { *i -= 1; return }, _ => (), } match direction { Direction::Downstream => { match self.parent.upgrade() { Some(p) => { // Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter let mut p = p.borrow_mut(); let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Downstream { let s = self.salmon.remove(i); p.salmon.push(s); } else { i += 1; } } }, None => { for s in &self.salmon { if s.direction == Direction::Downstream { print!("{}", s.name); } } self.salmon.retain(|s| s.direction!= Direction::Downstream); }, } }, Direction::Upstream => { if self.block_salmon { return } // `Vec::drain_filter` could probably be used here too let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Upstream { let idx = match self.find_node_path(self.salmon[i].name) { Some(idx) if!self.borrow_child(idx).very_block_salmon => Some(idx), _ => self.children.iter().position(|c|!c.borrow().very_block_salmon), }; match idx { Some(idx) => { let s = self.salmon.remove(i); self.borrow_mut_child(idx).salmon.push(s); }, None => i += 1, } } else { i += 1; } } }, } } pub fn tick(&mut self, tick: Tick) { use tick::PropagationOrder::*; match tick.propagation_order() { PostOrder => { for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } self.run_tick(tick); }, PreOrder => { self.run_tick(tick); for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } }, _ => unimplemented!(), } } // TODO: rewrite this, it's crap // I don't like this inside of Node... (or do I...?) fn run_tick(&mut self, tick: Tick) { use self::NodeType::*; use tick::Tick::*; match (tick, &self.node_type) { (Snow, _) => { for i in 0..self.children.len() { if self.borrow_child(i).snowy { self.become_snowy(); break; } } }, (Water, _) => { for i in 0..self.children.len() { if self.borrow_child(i).watered { self.become_watered(); break; } } }, (Power, &HydroPower) => self.powered = self.watered, (FishDown, _) => self.move_salmon(Direction::Downstream), (FishUp, _) => self.move_salmon(Direction::Upstream), (FishHatch, &Hatchery) => if self.is_powered() { self.add_salmon(Salmon { age: Age::Mature, direction: Direction::Upstream, name: "homeless" }); }, _ => (), } } // TODO: I don't like this... pub fn become_snowy(&mut self)
pub fn become_watered(&mut self) { self.watered = true; } pub fn is_powered(&self) -> bool { if self.block_power { false } else if self.powered { true } else { self.children.iter().any(|c| { c.borrow_mut().is_powered() }) } } pub fn parse_program(code: &str) -> Program { let mut tokens = HomespringSplit::new(code); let root_node = match tokens.next() { Some(name) => { Rc::new(RefCell::new(Node::new(name))) }, None => return Program::Quine, }; let mut current_node = Rc::clone(&root_node); for tok in tokens { if tok == "" { let parent = current_node.borrow().parent.upgrade().unwrap(); current_node = parent; } else { let child = Rc::new(RefCell::new(Node::new(tok))); child.borrow_mut().parent = Rc::downgrade(&current_node); current_node.borrow_mut().add_child(Rc::clone(&child)); current_node = child; } } Program::River(root_node) } } // #[test] // fn print_salmon_name() { // use std::io::Read; // use self::gag::BufferRedirect; // let name = "fishy fish"; // let s = Salmon { // age: Age::Young, // direction: Direction::Downstream, // name, // }; // let mut river = Node::new("universe"); // river.add_salmon(s); // let mut out = String::new(); // let mut buf = BufferRedirect::stdout().unwrap(); // river.run_tick(Tick::FishDown); // buf.read_to_string(&mut out); // assert_eq!(0, river.salmon.len()); // assert_eq!(&out[..], name); // }
{ use self::NodeType::*; self.snowy = true; match self.node_type { HydroPower => self.destroyed = true, _ => (), } }
identifier_body
river.rs
#[cfg(test)] extern crate gag; use std::rc::{Rc, Weak}; use std::cell::{RefCell, RefMut, Ref}; use tick::Tick; use salmon::{Salmon, Age, Direction}; use split_custom_escape::HomespringSplit; use program::Program; #[derive(Debug, PartialEq, Eq)] pub enum NodeType { Other(String), Hatchery, HydroPower, Snowmelt, Shallows(u8), Rapids(u8), AppendDown, Bear, ForceField, Sense, Clone, YoungBear, Bird, UpstreamKillingDevice, Waterfall, Universe, Powers, Marshy, Insulted, UpstreamSense, DownstreamSense, Evaporates, YouthFountain, Oblivion, Pump, RangeSense, Fear, ReverseUp, ReverseDown, Time, Lock, InverseLock, YoungSense, Switch, YoungSwitch, Narrows, AppendUp, YoungRangeSense, Net, ForceDown, ForceUp, Spawn, PowerInvert, Current, Bridge, Split, RangeSwitch, YoungRangeSwitch, } impl NodeType { pub fn from_name(name: &str) -> NodeType { // unimplemented!(); use self::NodeType::*; match &name.to_lowercase()[..] { "hatchery" => Hatchery, "hydro. power" => HydroPower, "snowmelt" => Snowmelt, "shallows" => Shallows(2), "rapids" => Rapids(2), "append. down" => AppendDown, "bear" => Bear, "force. field" => ForceField, "sense" => Sense, "clone" => Clone, "young bear" => YoungBear, "bird" => Bird, "upstream. killing. device" => UpstreamKillingDevice, "waterfall" => Waterfall, "universe" => Universe, "powers" => Powers, "marshy" => Marshy, "insulated" => Insulted, "upstream. sense" => UpstreamSense, "downstream. sense" => DownstreamSense, "evaporates" => Evaporates, "youth. fountain" => YouthFountain, "oblivion" => Oblivion, "pump" => Pump, "range. sense" => RangeSense, "fear" => Fear, "reverse. up" => ReverseUp, "reverse. down" => ReverseDown, "time" => Time, "lock" => Lock, "inverse. lock" => InverseLock, "young. sense" => YoungSense, "switch" => Switch, "young. switch" => YoungSwitch, "narrows" => Narrows, "append. up" => AppendUp, "young. range. sense" => YoungRangeSense, "net" => Net, "force. down" => ForceDown, "force. up" => ForceUp, "spawn" => Spawn, "power. invert" => PowerInvert, "current" => Current, "bridge" => Bridge, "split" => Split, "range. switch" => RangeSwitch, "young. range. switch" => YoungRangeSwitch, _ => Other(name.to_owned()), } } } #[derive(Debug)] pub struct Node<'a, 'b> { pub name: &'b str, pub node_type: NodeType, pub parent: Weak<RefCell<Node<'a, 'b>>>, pub children: Vec<Rc<RefCell<Node<'a, 'b>>>>, pub salmon: Vec<Salmon<'a>>, pub block_salmon: bool, pub very_block_salmon: bool, pub powered: bool, pub block_power: bool, pub watered: bool, pub block_water: bool, pub snowy: bool, pub block_snow: bool, pub destroyed: bool, } impl<'a, 'b> Node<'a, 'b> { pub fn new(name: &'b str) -> Node<'a, 'b> { let node = Node { name, node_type: NodeType::from_name(name), parent: Weak::new(), children: vec![], salmon: vec![], block_salmon: false, very_block_salmon: false, powered: false, block_power: false, watered: false, block_water: false, snowy: false, block_snow: false, destroyed: false, }; node.init() } fn init(mut self) -> Node<'a, 'b> { use self::NodeType::*; match &self.node_type { &Snowmelt => self.snowy = true, &Powers => self.powered = true, _ => (), } self } pub fn borrow_child(&self, n: usize) -> Ref<Node<'a, 'b>> { self.children[n].borrow() } pub fn borrow_mut_child(&self, n: usize) -> RefMut<Node<'a, 'b>> { self.children[n].borrow_mut() } pub fn add_child(&mut self, child: Rc<RefCell<Node<'a, 'b>>>) { self.children.push(child); } pub fn add_salmon(&mut self, salmon: Salmon<'a>) { self.salmon.push(salmon); } // Returns the index of the child that would lead to the node // with a name of `name`. pub fn find_node_path(&self, name: &str) -> Option<usize> { (0..self.children.len()).position(|i| self.borrow_child(i).find_node(name) ) } // This is supposed to use an in-order search, but that doesn't // really make sense for an n-ary tree... // This will at least be in-order for any nodes with <= 2 children. fn
(&self, name: &str) -> bool { let len = self.children.len(); if len > 0 { match self.borrow_child(0).find_node(name) { true => return true, false => (), } } if self.name == name { return true; } if len > 1 { for i in 1..len { match self.borrow_child(i).find_node(name) { true => return true, false => (), } } } false } // something to move fish up and down stream pub fn move_salmon(&mut self, direction: Direction) { match &mut self.node_type { &mut NodeType::Shallows(ref mut i) => if *i > 0 { *i -= 1; return }, &mut NodeType::Rapids(ref mut i) => if *i > 0 { *i -= 1; return }, _ => (), } match direction { Direction::Downstream => { match self.parent.upgrade() { Some(p) => { // Use `Vec::drain_filter` when once it stabilizes: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.drain_filter let mut p = p.borrow_mut(); let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Downstream { let s = self.salmon.remove(i); p.salmon.push(s); } else { i += 1; } } }, None => { for s in &self.salmon { if s.direction == Direction::Downstream { print!("{}", s.name); } } self.salmon.retain(|s| s.direction!= Direction::Downstream); }, } }, Direction::Upstream => { if self.block_salmon { return } // `Vec::drain_filter` could probably be used here too let mut i = 0; while i!= self.salmon.len() { if self.salmon[i].direction == Direction::Upstream { let idx = match self.find_node_path(self.salmon[i].name) { Some(idx) if!self.borrow_child(idx).very_block_salmon => Some(idx), _ => self.children.iter().position(|c|!c.borrow().very_block_salmon), }; match idx { Some(idx) => { let s = self.salmon.remove(i); self.borrow_mut_child(idx).salmon.push(s); }, None => i += 1, } } else { i += 1; } } }, } } pub fn tick(&mut self, tick: Tick) { use tick::PropagationOrder::*; match tick.propagation_order() { PostOrder => { for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } self.run_tick(tick); }, PreOrder => { self.run_tick(tick); for i in 0..self.children.len() { self.borrow_mut_child(i).tick(tick); } }, _ => unimplemented!(), } } // TODO: rewrite this, it's crap // I don't like this inside of Node... (or do I...?) fn run_tick(&mut self, tick: Tick) { use self::NodeType::*; use tick::Tick::*; match (tick, &self.node_type) { (Snow, _) => { for i in 0..self.children.len() { if self.borrow_child(i).snowy { self.become_snowy(); break; } } }, (Water, _) => { for i in 0..self.children.len() { if self.borrow_child(i).watered { self.become_watered(); break; } } }, (Power, &HydroPower) => self.powered = self.watered, (FishDown, _) => self.move_salmon(Direction::Downstream), (FishUp, _) => self.move_salmon(Direction::Upstream), (FishHatch, &Hatchery) => if self.is_powered() { self.add_salmon(Salmon { age: Age::Mature, direction: Direction::Upstream, name: "homeless" }); }, _ => (), } } // TODO: I don't like this... pub fn become_snowy(&mut self) { use self::NodeType::*; self.snowy = true; match self.node_type { HydroPower => self.destroyed = true, _ => (), } } pub fn become_watered(&mut self) { self.watered = true; } pub fn is_powered(&self) -> bool { if self.block_power { false } else if self.powered { true } else { self.children.iter().any(|c| { c.borrow_mut().is_powered() }) } } pub fn parse_program(code: &str) -> Program { let mut tokens = HomespringSplit::new(code); let root_node = match tokens.next() { Some(name) => { Rc::new(RefCell::new(Node::new(name))) }, None => return Program::Quine, }; let mut current_node = Rc::clone(&root_node); for tok in tokens { if tok == "" { let parent = current_node.borrow().parent.upgrade().unwrap(); current_node = parent; } else { let child = Rc::new(RefCell::new(Node::new(tok))); child.borrow_mut().parent = Rc::downgrade(&current_node); current_node.borrow_mut().add_child(Rc::clone(&child)); current_node = child; } } Program::River(root_node) } } // #[test] // fn print_salmon_name() { // use std::io::Read; // use self::gag::BufferRedirect; // let name = "fishy fish"; // let s = Salmon { // age: Age::Young, // direction: Direction::Downstream, // name, // }; // let mut river = Node::new("universe"); // river.add_salmon(s); // let mut out = String::new(); // let mut buf = BufferRedirect::stdout().unwrap(); // river.run_tick(Tick::FishDown); // buf.read_to_string(&mut out); // assert_eq!(0, river.salmon.len()); // assert_eq!(&out[..], name); // }
find_node
identifier_name
main.rs
#![recursion_limit="128"] #[macro_use] extern crate yew; use std::time::Duration; use std::char; use yew::html::*; use yew::services::timeout::TimeoutService; fn main() { let model = init_model(); program(model, update, view); } struct Model { input: String, interval: String, time: u64, befunge: Befunge, } struct Befunge { source: Array2d<char>, cursor: (i64, i64), direction: Direction, running: bool, mode: Mode, stack: Stack, output: String } type Stack = Vec<i64>; #[derive(Debug)] enum Mode { StringMode, End, None } #[derive(Debug)] enum Direction { Up, Down, Left, Right } type Array2d<T> = Vec<Vec<T>>; const DEFAULT_INTERVAL: f64 = 200.0; const DEFAULT_INPUT: &str = "2>:1->1-00p::00g: v v%-g00::_v#!`\\-_$$.v ^g00_ v ^+1 < > :.^"; fn init_model() -> Model { Model { input: DEFAULT_INPUT.to_string(), interval: format!("{}", DEFAULT_INTERVAL), time: 0, befunge: Befunge { source: string_to_array(DEFAULT_INPUT), cursor: (0, 0), direction: Direction::Right,
stack: vec![], output: "".to_string(), } } } enum Msg { Input(String), Interval(String), Toggle, Step, Reset, Tick, } fn update(context: &mut Context<Msg>, model: &mut Model, msg: Msg) { match msg { Msg::Input(input) => { // model.befunge.source = string_to_array(input.as_str()); model.input = input; }, Msg::Interval(interval) => { model.interval = interval; }, Msg::Toggle => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => model.befunge.running =!model.befunge.running, } if model.befunge.running { context.timeout(Duration::from_millis(0), || Msg::Tick); } match model.befunge.mode { Mode::End => model.time = 0, _ => (), } }, Msg::Reset => { model.befunge = Befunge { cursor: (0, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: false, source: string_to_array(model.input.as_str()), mode: Mode::End, }; model.time = 0; }, Msg::Tick => { if model.befunge.running { let frame = (1.0 / model.interval .parse() .unwrap_or(DEFAULT_INTERVAL) .max(0.0001).min(1.0)) .round() as usize; for _ in 0..frame { process(&mut model.befunge) } model.time += frame as u64; let ms = model.interval .parse() .unwrap_or(DEFAULT_INTERVAL as u64) .max(0).min(5000); context.timeout(Duration::from_millis(ms), || Msg::Tick); } }, Msg::Step => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => (), } model.befunge.running = false; model.time += 1; process(&mut model.befunge); }, } } fn init_befunge(model: &Model) -> Befunge { Befunge { cursor: (-1, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: true, source: string_to_array(model.input.as_str()), mode: Mode::None, } } fn string_to_array(source: &str) -> Array2d<char> { source.split("\n").map( |v| v.chars().collect() ).collect() } fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> { let l = a.len() as i64; if l == 0 { None } else { Some(i % l) } } fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> { let (x, y) = cursor; let cy = cyclic_index(&a, y); let cx = cy .and_then( |cy_| a.get(cy_ as usize) ) .and_then( |row| cyclic_index(row, x) ); cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) ) } fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> { let (x, y) = cursor; a.get(y as usize) .and_then( |row| row.get(x as usize) ) .cloned() } fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) { let (x, y) = cursor; a[y as usize][x as usize] = v; } // fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> { // a.iter().enumerate().map( |(y, row)| // row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect() // ).collect() // } fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) { let (x, y) = cursor; let cursor_candidate = match *direction { Direction::Left => (x - 1, y), Direction::Right => (x + 1, y), Direction::Up => (x, y - 1), Direction::Down => (x, y + 1), }; cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0)) } fn process(b: &mut Befunge) { let cursor = walk_next(&b.source, &b.direction, b.cursor); let cell = get2d(&b.source, cursor).unwrap_or(' '); match b.mode { Mode::End => (), Mode::StringMode => { b.cursor = cursor; if cell!= '"' { b.stack.push(cell as i64); } else { commands(cell, cursor, b); } }, Mode::None => { b.cursor = cursor; commands(cell, cursor, b); } } } fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) { let y = s.pop().unwrap_or(0); let x = s.pop().unwrap_or(0); s.push(f(x, y)); } fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) { match cell { '<' => b.direction = Direction::Left, '>' => b.direction = Direction::Right, '^' => b.direction = Direction::Up, 'v' => b.direction = Direction::Down, '' => (), '_' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Right } else { Direction::Left }; }, '|' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Down } else { Direction::Up }; }, '#' => b.cursor = walk_next(&b.source, &b.direction, cursor), '@' => { b.running = false; b.mode = Mode::End; }, '0' => b.stack.push(0), '1' => b.stack.push(1), '2' => b.stack.push(2), '3' => b.stack.push(3), '4' => b.stack.push(4), '5' => b.stack.push(5), '6' => b.stack.push(6), '7' => b.stack.push(7), '8' => b.stack.push(8), '9' => b.stack.push(9), '"' => b.mode = match b.mode { Mode::StringMode => Mode::None, _ => Mode::StringMode, }, '.' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{} ", b.output, v); }, ',' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{}", b.output, char::from_u32(v as u32).unwrap_or(' ') ); }, '+' => calc( &mut b.stack, |x, y| x + y ), '-' => calc( &mut b.stack, |x, y| x - y ), '*' => calc( &mut b.stack, |x, y| x * y ), '/' => calc( &mut b.stack, |x, y| x / y ), '%' => calc( &mut b.stack, |x, y| x % y ), '`' => calc( &mut b.stack, |x, y| if x > y { 1 } else { 0 } ), '!' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(if v == 0 { 1 } else { 0 }); }, ':' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(v); b.stack.push(v); }, '\\' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); b.stack.push(y); b.stack.push(x); }, '$' => { b.stack.pop(); }, 'g' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let c = get2d(&b.source, (x, y)) .map( |v| v as i64 ) .unwrap_or(0); b.stack.push(c); }, 'p' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let v = b.stack.pop().unwrap_or(0); set2d((x, y), char::from_u32(v as u32).unwrap_or(' '), &mut b.source); }, _ => (), } } fn view(model: &Model) -> Html<Msg> { html! { <div class="main", > <h1 class="title", > { "Befunge" } <span class="note", >{ "Yew (Rust wasm32-unknown-emscripten)" }</span> </h1> <div> <textarea class="text", type="text", oninput=|e: InputData| Msg::Input(e.value), value=&model.input, placeholder="This textarea will not work! Sorry :(", rows=10, cols=80, /> </div> <input class="text", type="text", oninput=|e: InputData| Msg::Interval(e.value), value=&model.interval, /> <input class="button", type="button", onclick=|_| Msg::Toggle, value=&if model.befunge.running { "stop" } else { "run" }, /> <input class="button", type="button", onclick=|_| Msg::Step, value=&"step", /> <input class="button", type="button", onclick=|_| Msg::Reset, value=&"reset", /> <div> <div class="text", > { colorize(&model.befunge.source, model.befunge.cursor) } </div> </div> <div> <div class="text", > { model.befunge.stack.iter().map( |v| format!("{}", v) ).collect::<Vec<_>>().join(" ") } </div> </div> <div> <pre class="text", > { &model.befunge.output } </pre> </div> <div>{ format!("{}", model.time) }</div> <div> <a class="footer", href="https://github.com/pnlybubbles/yew-befunge", target="_blank", > { "source "} </a> </div> </div> } } fn fix_char_width(x: char) -> char { let ac = x as u32; if 33 <= ac && ac <= 126 { x } else { char::from_u32(160).unwrap_or(' ') } } fn colorize(source: &Array2d<char>, cursor: (i64, i64)) -> Html<Msg> { let (cx, cy) = cursor; html! { <div> { for source.iter().enumerate().map( |(y, row)| { html! { <div> { for row.iter().enumerate().map( |(x, cell)| { html! { <span class=if x as i64 == cx && y as i64 == cy { "cursor" } else { "" }, > { fix_char_width(*cell).to_string() } </span> } }) } </div> } }) } </div> } }
running: false, mode: Mode::End,
random_line_split
main.rs
#![recursion_limit="128"] #[macro_use] extern crate yew; use std::time::Duration; use std::char; use yew::html::*; use yew::services::timeout::TimeoutService; fn main() { let model = init_model(); program(model, update, view); } struct Model { input: String, interval: String, time: u64, befunge: Befunge, } struct Befunge { source: Array2d<char>, cursor: (i64, i64), direction: Direction, running: bool, mode: Mode, stack: Stack, output: String } type Stack = Vec<i64>; #[derive(Debug)] enum Mode { StringMode, End, None } #[derive(Debug)] enum Direction { Up, Down, Left, Right } type Array2d<T> = Vec<Vec<T>>; const DEFAULT_INTERVAL: f64 = 200.0; const DEFAULT_INPUT: &str = "2>:1->1-00p::00g: v v%-g00::_v#!`\\-_$$.v ^g00_ v ^+1 < > :.^"; fn init_model() -> Model { Model { input: DEFAULT_INPUT.to_string(), interval: format!("{}", DEFAULT_INTERVAL), time: 0, befunge: Befunge { source: string_to_array(DEFAULT_INPUT), cursor: (0, 0), direction: Direction::Right, running: false, mode: Mode::End, stack: vec![], output: "".to_string(), } } } enum Msg { Input(String), Interval(String), Toggle, Step, Reset, Tick, } fn update(context: &mut Context<Msg>, model: &mut Model, msg: Msg) { match msg { Msg::Input(input) => { // model.befunge.source = string_to_array(input.as_str()); model.input = input; }, Msg::Interval(interval) => { model.interval = interval; }, Msg::Toggle => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => model.befunge.running =!model.befunge.running, } if model.befunge.running { context.timeout(Duration::from_millis(0), || Msg::Tick); } match model.befunge.mode { Mode::End => model.time = 0, _ => (), } }, Msg::Reset => { model.befunge = Befunge { cursor: (0, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: false, source: string_to_array(model.input.as_str()), mode: Mode::End, }; model.time = 0; }, Msg::Tick => { if model.befunge.running { let frame = (1.0 / model.interval .parse() .unwrap_or(DEFAULT_INTERVAL) .max(0.0001).min(1.0)) .round() as usize; for _ in 0..frame { process(&mut model.befunge) } model.time += frame as u64; let ms = model.interval .parse() .unwrap_or(DEFAULT_INTERVAL as u64) .max(0).min(5000); context.timeout(Duration::from_millis(ms), || Msg::Tick); } }, Msg::Step => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => (), } model.befunge.running = false; model.time += 1; process(&mut model.befunge); }, } } fn init_befunge(model: &Model) -> Befunge { Befunge { cursor: (-1, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: true, source: string_to_array(model.input.as_str()), mode: Mode::None, } } fn string_to_array(source: &str) -> Array2d<char> { source.split("\n").map( |v| v.chars().collect() ).collect() } fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> { let l = a.len() as i64; if l == 0 { None } else { Some(i % l) } } fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> { let (x, y) = cursor; let cy = cyclic_index(&a, y); let cx = cy .and_then( |cy_| a.get(cy_ as usize) ) .and_then( |row| cyclic_index(row, x) ); cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) ) } fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> { let (x, y) = cursor; a.get(y as usize) .and_then( |row| row.get(x as usize) ) .cloned() } fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) { let (x, y) = cursor; a[y as usize][x as usize] = v; } // fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> { // a.iter().enumerate().map( |(y, row)| // row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect() // ).collect() // } fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) { let (x, y) = cursor; let cursor_candidate = match *direction { Direction::Left => (x - 1, y), Direction::Right => (x + 1, y), Direction::Up => (x, y - 1), Direction::Down => (x, y + 1), }; cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0)) } fn process(b: &mut Befunge) { let cursor = walk_next(&b.source, &b.direction, b.cursor); let cell = get2d(&b.source, cursor).unwrap_or(' '); match b.mode { Mode::End => (), Mode::StringMode => { b.cursor = cursor; if cell!= '"' { b.stack.push(cell as i64); } else { commands(cell, cursor, b); } }, Mode::None => { b.cursor = cursor; commands(cell, cursor, b); } } } fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) { let y = s.pop().unwrap_or(0); let x = s.pop().unwrap_or(0); s.push(f(x, y)); } fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) { match cell { '<' => b.direction = Direction::Left, '>' => b.direction = Direction::Right, '^' => b.direction = Direction::Up, 'v' => b.direction = Direction::Down, '' => (), '_' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Right } else { Direction::Left }; }, '|' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Down } else { Direction::Up }; }, '#' => b.cursor = walk_next(&b.source, &b.direction, cursor), '@' => { b.running = false; b.mode = Mode::End; }, '0' => b.stack.push(0), '1' => b.stack.push(1), '2' => b.stack.push(2), '3' => b.stack.push(3), '4' => b.stack.push(4), '5' => b.stack.push(5), '6' => b.stack.push(6), '7' => b.stack.push(7), '8' => b.stack.push(8), '9' => b.stack.push(9), '"' => b.mode = match b.mode { Mode::StringMode => Mode::None, _ => Mode::StringMode, }, '.' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{} ", b.output, v); }, ',' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{}", b.output, char::from_u32(v as u32).unwrap_or(' ') ); }, '+' => calc( &mut b.stack, |x, y| x + y ), '-' => calc( &mut b.stack, |x, y| x - y ), '*' => calc( &mut b.stack, |x, y| x * y ), '/' => calc( &mut b.stack, |x, y| x / y ), '%' => calc( &mut b.stack, |x, y| x % y ), '`' => calc( &mut b.stack, |x, y| if x > y { 1 } else { 0 } ), '!' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(if v == 0 { 1 } else { 0 }); }, ':' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(v); b.stack.push(v); }, '\\' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); b.stack.push(y); b.stack.push(x); }, '$' => { b.stack.pop(); }, 'g' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let c = get2d(&b.source, (x, y)) .map( |v| v as i64 ) .unwrap_or(0); b.stack.push(c); }, 'p' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let v = b.stack.pop().unwrap_or(0); set2d((x, y), char::from_u32(v as u32).unwrap_or(' '), &mut b.source); }, _ => (), } } fn view(model: &Model) -> Html<Msg> { html! { <div class="main", > <h1 class="title", > { "Befunge" } <span class="note", >{ "Yew (Rust wasm32-unknown-emscripten)" }</span> </h1> <div> <textarea class="text", type="text", oninput=|e: InputData| Msg::Input(e.value), value=&model.input, placeholder="This textarea will not work! Sorry :(", rows=10, cols=80, /> </div> <input class="text", type="text", oninput=|e: InputData| Msg::Interval(e.value), value=&model.interval, /> <input class="button", type="button", onclick=|_| Msg::Toggle, value=&if model.befunge.running { "stop" } else { "run" }, /> <input class="button", type="button", onclick=|_| Msg::Step, value=&"step", /> <input class="button", type="button", onclick=|_| Msg::Reset, value=&"reset", /> <div> <div class="text", > { colorize(&model.befunge.source, model.befunge.cursor) } </div> </div> <div> <div class="text", > { model.befunge.stack.iter().map( |v| format!("{}", v) ).collect::<Vec<_>>().join(" ") } </div> </div> <div> <pre class="text", > { &model.befunge.output } </pre> </div> <div>{ format!("{}", model.time) }</div> <div> <a class="footer", href="https://github.com/pnlybubbles/yew-befunge", target="_blank", > { "source "} </a> </div> </div> } } fn
(x: char) -> char { let ac = x as u32; if 33 <= ac && ac <= 126 { x } else { char::from_u32(160).unwrap_or(' ') } } fn colorize(source: &Array2d<char>, cursor: (i64, i64)) -> Html<Msg> { let (cx, cy) = cursor; html! { <div> { for source.iter().enumerate().map( |(y, row)| { html! { <div> { for row.iter().enumerate().map( |(x, cell)| { html! { <span class=if x as i64 == cx && y as i64 == cy { "cursor" } else { "" }, > { fix_char_width(*cell).to_string() } </span> } }) } </div> } }) } </div> } }
fix_char_width
identifier_name
main.rs
#![recursion_limit="128"] #[macro_use] extern crate yew; use std::time::Duration; use std::char; use yew::html::*; use yew::services::timeout::TimeoutService; fn main()
struct Model { input: String, interval: String, time: u64, befunge: Befunge, } struct Befunge { source: Array2d<char>, cursor: (i64, i64), direction: Direction, running: bool, mode: Mode, stack: Stack, output: String } type Stack = Vec<i64>; #[derive(Debug)] enum Mode { StringMode, End, None } #[derive(Debug)] enum Direction { Up, Down, Left, Right } type Array2d<T> = Vec<Vec<T>>; const DEFAULT_INTERVAL: f64 = 200.0; const DEFAULT_INPUT: &str = "2>:1->1-00p::00g: v v%-g00::_v#!`\\-_$$.v ^g00_ v ^+1 < > :.^"; fn init_model() -> Model { Model { input: DEFAULT_INPUT.to_string(), interval: format!("{}", DEFAULT_INTERVAL), time: 0, befunge: Befunge { source: string_to_array(DEFAULT_INPUT), cursor: (0, 0), direction: Direction::Right, running: false, mode: Mode::End, stack: vec![], output: "".to_string(), } } } enum Msg { Input(String), Interval(String), Toggle, Step, Reset, Tick, } fn update(context: &mut Context<Msg>, model: &mut Model, msg: Msg) { match msg { Msg::Input(input) => { // model.befunge.source = string_to_array(input.as_str()); model.input = input; }, Msg::Interval(interval) => { model.interval = interval; }, Msg::Toggle => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => model.befunge.running =!model.befunge.running, } if model.befunge.running { context.timeout(Duration::from_millis(0), || Msg::Tick); } match model.befunge.mode { Mode::End => model.time = 0, _ => (), } }, Msg::Reset => { model.befunge = Befunge { cursor: (0, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: false, source: string_to_array(model.input.as_str()), mode: Mode::End, }; model.time = 0; }, Msg::Tick => { if model.befunge.running { let frame = (1.0 / model.interval .parse() .unwrap_or(DEFAULT_INTERVAL) .max(0.0001).min(1.0)) .round() as usize; for _ in 0..frame { process(&mut model.befunge) } model.time += frame as u64; let ms = model.interval .parse() .unwrap_or(DEFAULT_INTERVAL as u64) .max(0).min(5000); context.timeout(Duration::from_millis(ms), || Msg::Tick); } }, Msg::Step => { match model.befunge.mode { Mode::End => model.befunge = init_befunge(model), _ => (), } model.befunge.running = false; model.time += 1; process(&mut model.befunge); }, } } fn init_befunge(model: &Model) -> Befunge { Befunge { cursor: (-1, 0), direction: Direction::Right, stack: vec![], output: "".to_string(), running: true, source: string_to_array(model.input.as_str()), mode: Mode::None, } } fn string_to_array(source: &str) -> Array2d<char> { source.split("\n").map( |v| v.chars().collect() ).collect() } fn cyclic_index<T>(a: &Vec<T>, i: i64) -> Option<i64> { let l = a.len() as i64; if l == 0 { None } else { Some(i % l) } } fn cyclic_index2d<T>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<(i64, i64)> { let (x, y) = cursor; let cy = cyclic_index(&a, y); let cx = cy .and_then( |cy_| a.get(cy_ as usize) ) .and_then( |row| cyclic_index(row, x) ); cx.and_then( |cx_| cy.map( |cy_| (cx_, cy_) ) ) } fn get2d<T: Clone>(a: &Array2d<T>, cursor: (i64, i64)) -> Option<T> { let (x, y) = cursor; a.get(y as usize) .and_then( |row| row.get(x as usize) ) .cloned() } fn set2d<T>(cursor: (i64, i64), v: T, a: &mut Array2d<T>) { let (x, y) = cursor; a[y as usize][x as usize] = v; } // fn indexed_map2d<T, S, F: Fn((i64, i64), &T) -> S>(f: F, a: &Array2d<T>) -> Array2d<S> { // a.iter().enumerate().map( |(y, row)| // row.iter().enumerate().map( |(x, c)| f((x as i64, y as i64), c)).collect() // ).collect() // } fn walk_next<T>(a: &Array2d<T>, direction: &Direction, cursor: (i64, i64)) -> (i64, i64) { let (x, y) = cursor; let cursor_candidate = match *direction { Direction::Left => (x - 1, y), Direction::Right => (x + 1, y), Direction::Up => (x, y - 1), Direction::Down => (x, y + 1), }; cyclic_index2d(&a, cursor_candidate).unwrap_or((0, 0)) } fn process(b: &mut Befunge) { let cursor = walk_next(&b.source, &b.direction, b.cursor); let cell = get2d(&b.source, cursor).unwrap_or(' '); match b.mode { Mode::End => (), Mode::StringMode => { b.cursor = cursor; if cell!= '"' { b.stack.push(cell as i64); } else { commands(cell, cursor, b); } }, Mode::None => { b.cursor = cursor; commands(cell, cursor, b); } } } fn calc<F: Fn(i64, i64) -> i64>(s: &mut Stack, f: F) { let y = s.pop().unwrap_or(0); let x = s.pop().unwrap_or(0); s.push(f(x, y)); } fn commands(cell: char, cursor: (i64, i64), b: &mut Befunge) { match cell { '<' => b.direction = Direction::Left, '>' => b.direction = Direction::Right, '^' => b.direction = Direction::Up, 'v' => b.direction = Direction::Down, '' => (), '_' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Right } else { Direction::Left }; }, '|' => { let v = b.stack.pop().unwrap_or(0); b.direction = if v == 0 { Direction::Down } else { Direction::Up }; }, '#' => b.cursor = walk_next(&b.source, &b.direction, cursor), '@' => { b.running = false; b.mode = Mode::End; }, '0' => b.stack.push(0), '1' => b.stack.push(1), '2' => b.stack.push(2), '3' => b.stack.push(3), '4' => b.stack.push(4), '5' => b.stack.push(5), '6' => b.stack.push(6), '7' => b.stack.push(7), '8' => b.stack.push(8), '9' => b.stack.push(9), '"' => b.mode = match b.mode { Mode::StringMode => Mode::None, _ => Mode::StringMode, }, '.' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{} ", b.output, v); }, ',' => { let v = b.stack.pop().unwrap_or(0); b.output = format!("{}{}", b.output, char::from_u32(v as u32).unwrap_or(' ') ); }, '+' => calc( &mut b.stack, |x, y| x + y ), '-' => calc( &mut b.stack, |x, y| x - y ), '*' => calc( &mut b.stack, |x, y| x * y ), '/' => calc( &mut b.stack, |x, y| x / y ), '%' => calc( &mut b.stack, |x, y| x % y ), '`' => calc( &mut b.stack, |x, y| if x > y { 1 } else { 0 } ), '!' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(if v == 0 { 1 } else { 0 }); }, ':' => { let v = b.stack.pop().unwrap_or(0); b.stack.push(v); b.stack.push(v); }, '\\' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); b.stack.push(y); b.stack.push(x); }, '$' => { b.stack.pop(); }, 'g' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let c = get2d(&b.source, (x, y)) .map( |v| v as i64 ) .unwrap_or(0); b.stack.push(c); }, 'p' => { let y = b.stack.pop().unwrap_or(0); let x = b.stack.pop().unwrap_or(0); let v = b.stack.pop().unwrap_or(0); set2d((x, y), char::from_u32(v as u32).unwrap_or(' '), &mut b.source); }, _ => (), } } fn view(model: &Model) -> Html<Msg> { html! { <div class="main", > <h1 class="title", > { "Befunge" } <span class="note", >{ "Yew (Rust wasm32-unknown-emscripten)" }</span> </h1> <div> <textarea class="text", type="text", oninput=|e: InputData| Msg::Input(e.value), value=&model.input, placeholder="This textarea will not work! Sorry :(", rows=10, cols=80, /> </div> <input class="text", type="text", oninput=|e: InputData| Msg::Interval(e.value), value=&model.interval, /> <input class="button", type="button", onclick=|_| Msg::Toggle, value=&if model.befunge.running { "stop" } else { "run" }, /> <input class="button", type="button", onclick=|_| Msg::Step, value=&"step", /> <input class="button", type="button", onclick=|_| Msg::Reset, value=&"reset", /> <div> <div class="text", > { colorize(&model.befunge.source, model.befunge.cursor) } </div> </div> <div> <div class="text", > { model.befunge.stack.iter().map( |v| format!("{}", v) ).collect::<Vec<_>>().join(" ") } </div> </div> <div> <pre class="text", > { &model.befunge.output } </pre> </div> <div>{ format!("{}", model.time) }</div> <div> <a class="footer", href="https://github.com/pnlybubbles/yew-befunge", target="_blank", > { "source "} </a> </div> </div> } } fn fix_char_width(x: char) -> char { let ac = x as u32; if 33 <= ac && ac <= 126 { x } else { char::from_u32(160).unwrap_or(' ') } } fn colorize(source: &Array2d<char>, cursor: (i64, i64)) -> Html<Msg> { let (cx, cy) = cursor; html! { <div> { for source.iter().enumerate().map( |(y, row)| { html! { <div> { for row.iter().enumerate().map( |(x, cell)| { html! { <span class=if x as i64 == cx && y as i64 == cy { "cursor" } else { "" }, > { fix_char_width(*cell).to_string() } </span> } }) } </div> } }) } </div> } }
{ let model = init_model(); program(model, update, view); }
identifier_body
section.rs
//! # Section Block //! //! _[slack api docs 🔗]_ //! //! Available in surfaces: //! - [modals 🔗] //! - [messages 🔗] //! - [home tabs 🔗] //! //! A `section` is one of the most flexible blocks available - //! it can be used as a simple text block, //! in combination with text fields, //! or side-by-side with any of the available [block elements 🔗] //! //! [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section //! [modals 🔗]: https://api.slack.com/surfaces/modals //! [messages 🔗]: https://api.slack.com/surfaces/messages //! [home tabs 🔗]: https://api.slack.com/surfaces/tabs //! [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements use std::borrow::Cow; use serde::{Deserialize, Serialize}; #[cfg(feature = "validation")] use validator::Validate; #[cfg(feature = "validation")] use crate::val_helpr::ValidationResult; use crate::{compose::text, elems::BlockElement}; /// # Section Block /// /// _[slack api docs 🔗]_ /// /// Available in surfaces: /// - [modals 🔗] /// - [messages 🔗] /// - [home tabs 🔗] /// /// A `section` is one of the most flexible blocks available - /// it can be used as a simple text block, /// in combination with text fields, /// or side-by-side with any of the available [block elements 🔗] /// /// [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section /// [modals 🔗]: https://api.slack.com/surfaces/modals /// [messages 🔗]: https://api.slack.com/surfaces/messages /// [home tabs 🔗]: https://api.slack.com/surfaces/tabs /// [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements #[derive(Clone, Debug, Deserialize, Hash, PartialEq, Serialize)] #[cfg_attr(feature = "validation", derive(Validate))] pub struct Section<'a> { #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::fields"))] fields: Option<Cow<'a, [text::Text]>>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::text"))] text: Option<text::Text>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::block_id"))] block_id: Option<Cow<'a, str>>, /// One of the available [element objects 🔗][element_objects]. /// /// [element_objects]: https://api.slack.com/reference/messaging/block-elements #[serde(skip_serializing_if = "Option::is_none")] accessory: Option<BlockElement<'a>>, } impl<'a> Section<'a> { /// Build a new section block /// /// For example, see `blocks::section::build::SectionBuilder`. pub fn builder() -> build::SectionBuilderInit<'a> { build::SectionBuilderInit::new() } /// Validate that this Section block agrees with Slack's model requirements /// /// # Errors /// - If `fields` contains more than 10 fields /// - If one of `fields` longer than 2000 chars /// - If `text` longer than 3000 chars /// - If `block_id` longer than 255 chars /// /// # Example /// ``` /// use slack_blocks::{blocks, compose::text}; /// /// let long_string = std::iter::repeat(' ').take(256).collect::<String>(); /// /// let block = blocks::Section::builder().text(text::Plain::from("file_id")) /// .block_id(long_string) /// .build(); /// /// assert_eq!(true, matches!(block.validate(), Err(_))); /// ```
Validate::validate(self) } } /// Section block builder pub mod build { use std::marker::PhantomData; use super::*; use crate::build::*; /// Compile-time markers for builder methods #[allow(non_camel_case_types)] pub mod method { /// SectionBuilder.text #[derive(Clone, Copy, Debug)] pub struct text; } /// Initial state for `SectionBuilder` pub type SectionBuilderInit<'a> = SectionBuilder<'a, RequiredMethodNotCalled<method::text>>; /// Build an Section block /// /// Allows you to construct safely, with compile-time checks /// on required setter methods. /// /// # Required Methods /// `SectionBuilder::build()` is only available if these methods have been called: /// - `text` **or** `field(s)`, both may be called. /// /// # Example /// ``` /// use slack_blocks::{blocks::Section, /// elems::Image, /// text, /// text::ToSlackPlaintext}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .field("bar".plaintext()) /// .field("baz".plaintext()) /// // alternatively: /// .fields(vec!["bar".plaintext(), /// "baz".plaintext()] /// .into_iter() /// .map(text::Text::from) /// ) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` #[derive(Debug)] pub struct SectionBuilder<'a, Text> { accessory: Option<BlockElement<'a>>, text: Option<text::Text>, fields: Option<Vec<text::Text>>, block_id: Option<Cow<'a, str>>, state: PhantomData<Text>, } impl<'a, E> SectionBuilder<'a, E> { /// Create a new SectionBuilder pub fn new() -> Self { Self { accessory: None, text: None, fields: None, block_id: None, state: PhantomData::<_> } } /// Set `accessory` (Optional) pub fn accessory<B>(mut self, acc: B) -> Self where B: Into<BlockElement<'a>> { self.accessory = Some(acc.into()); self } /// Add `text` (**Required: this or `field(s)`**) /// /// The text for the block, in the form of a [text object 🔗]. /// /// Maximum length for the text in this field is 3000 characters. /// /// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { SectionBuilder { accessory: self.accessory, text: Some(text.into()), fields: self.fields, block_id: self.block_id, state: PhantomData::<_> } } /// Set `fields` (**Required: this or `text`**) /// /// A collection of [text objects 🔗]. /// /// Any text objects included with fields will be /// rendered in a compact format that allows for /// 2 columns of side-by-side text. /// /// Maximum number of items is 10. /// /// Maximum length for the text in each item is 2000 characters. /// /// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>> where I: IntoIterator<Item = text::Text> { SectionBuilder { accessory: self.accessory, text: self.text, fields: Some(fields.into_iter().collect()), block_id: self.block_id, state: PhantomData::<_> } } /// Append a single field to `fields`. pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { let mut fields = self.fields.take().unwrap_or_default(); fields.push(text.into()); self.fields(fields) } /// XML macro children, appends `fields` to the Section. /// /// To set `text`, use the `text` attribute. /// ``` /// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext}; /// /// let xml = blox! { /// <section_block text={"Section".plaintext()}> /// <text kind=plain>"Foo"</text> /// <text kind=plain>"Bar"</text> /// </section_block> /// }; /// /// let equiv = Section::builder().text("Section".plaintext()) /// .field("Foo".plaintext()) /// .field("Bar".plaintext()) /// .build(); /// /// assert_eq!(xml, equiv); /// ``` #[cfg(feature = "blox")] #[cfg_attr(docsrs, doc(cfg(feature = "blox")))] pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { self.field(text) } /// Set `block_id` (Optional) /// /// A string acting as a unique identifier for a block. /// /// You can use this `block_id` when you receive an interaction payload /// to [identify the source of the action 🔗]. /// /// If not specified, a `block_id` will be generated. /// /// Maximum length for this field is 255 characters. /// /// [identify the source of the action 🔗]: https://api.slack.com/interactivity/handling#payloads pub fn block_id<S>(mut self, block_id: S) -> Self where S: Into<Cow<'a, str>> { self.block_id = Some(block_id.into()); self } } impl<'a> SectionBuilder<'a, Set<method::text>> { /// All done building, now give me a darn actions block! /// /// > `no method name 'build' found for struct 'SectionBuilder<...>'`? /// Make sure all required setter methods have been called. See docs for `SectionBuilder`. /// /// ```compile_fail /// use slack_blocks::blocks::Section; /// /// let foo = Section::builder().build(); // Won't compile! /// ``` /// /// ``` /// use slack_blocks::{blocks::Section, /// compose::text::ToSlackPlaintext, /// elems::Image}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` pub fn build(self) -> Section<'a> { Section { text: self.text, fields: self.fields.map(|fs| fs.into()), accessory: self.accessory, block_id: self.block_id } } } } #[cfg(feature = "validation")] mod validate { use super::*; use crate::{compose::text, val_helpr::{below_len, ValidatorResult}}; pub(super) fn text(text: &text::Text) -> ValidatorResult { below_len("Section.text", 3000, text.as_ref()) } pub(super) fn block_id(text: &Cow<str>) -> ValidatorResult { below_len("Section.block_id", 255, text.as_ref()) } pub(super) fn fields(texts: &Cow<[text::Text]>) -> ValidatorResult { below_len("Section.fields", 10, texts.as_ref()).and( texts.iter() .map(|text| { below_len( "Section.fields", 2000, text.as_ref()) }) .collect(), ) } }
#[cfg(feature = "validation")] #[cfg_attr(docsrs, doc(cfg(feature = "validation")))] pub fn validate(&self) -> ValidationResult {
random_line_split
section.rs
//! # Section Block //! //! _[slack api docs 🔗]_ //! //! Available in surfaces: //! - [modals 🔗] //! - [messages 🔗] //! - [home tabs 🔗] //! //! A `section` is one of the most flexible blocks available - //! it can be used as a simple text block, //! in combination with text fields, //! or side-by-side with any of the available [block elements 🔗] //! //! [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section //! [modals 🔗]: https://api.slack.com/surfaces/modals //! [messages 🔗]: https://api.slack.com/surfaces/messages //! [home tabs 🔗]: https://api.slack.com/surfaces/tabs //! [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements use std::borrow::Cow; use serde::{Deserialize, Serialize}; #[cfg(feature = "validation")] use validator::Validate; #[cfg(feature = "validation")] use crate::val_helpr::ValidationResult; use crate::{compose::text, elems::BlockElement}; /// # Section Block /// /// _[slack api docs 🔗]_ /// /// Available in surfaces: /// - [modals 🔗] /// - [messages 🔗] /// - [home tabs 🔗] /// /// A `section` is one of the most flexible blocks available - /// it can be used as a simple text block, /// in combination with text fields, /// or side-by-side with any of the available [block elements 🔗] /// /// [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section /// [modals 🔗]: https://api.slack.com/surfaces/modals /// [messages 🔗]: https://api.slack.com/surfaces/messages /// [home tabs 🔗]: https://api.slack.com/surfaces/tabs /// [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements #[derive(Clone, Debug, Deserialize, Hash, PartialEq, Serialize)] #[cfg_attr(feature = "validation", derive(Validate))] pub struct Section<'a> { #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::fields"))] fields: Option<Cow<'a, [text::Text]>>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::text"))] text: Option<text::Text>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::block_id"))] block_id: Option<Cow<'a, str>>, /// One of the available [element objects 🔗][element_objects]. /// /// [element_objects]: https://api.slack.com/reference/messaging/block-elements #[serde(skip_serializing_if = "Option::is_none")] accessory: Option<BlockElement<'a>>, } impl<'a> Section<'a> { /// Build a new section block /// /// For example, see `blocks::section::build::SectionBuilder`. pub fn builder() -> build::SectionBuilderInit<'a> { build::SectionBuilderInit::new() } /// Validate that this Section block agrees with Slack's model requirements /// /// # Errors /// - If `fields` contains more than 10 fields /// - If one of `fields` longer than 2000 chars /// - If `text` longer than 3000 chars /// - If `block_id` longer than 255 chars /// /// # Example /// ``` /// use slack_blocks::{blocks, compose::text}; /// /// let long_string = std::iter::repeat(' ').take(256).collect::<String>(); /// /// let block = blocks::Section::builder().text(text::Plain::from("file_id")) /// .block_id(long_string) /// .build(); /// /// assert_eq!(true, matches!(block.validate(), Err(_))); /// ``` #[cfg(feature = "validation")] #[cfg_attr(docsrs, doc(cfg(feature = "validation")))] pub fn validate(&self) -> ValidationResult { Validate::validate(self) } } /// Section block builder pub mod build { use std::marker::PhantomData; use super::*; use crate::build::*; /// Compile-time markers for builder methods #[allow(non_camel_case_types)] pub mod method { /// SectionBuilder.text #[derive(Clone, Copy, Debug)] pub struct text; } /// Initial state for `SectionBuilder` pub type SectionBuilderInit<'a> = SectionBuilder<'a, RequiredMethodNotCalled<method::text>>; /// Build an Section block /// /// Allows you to construct safely, with compile-time checks /// on required setter methods. /// /// # Required Methods /// `SectionBuilder::build()` is only available if these methods have been called: /// - `text` **or** `field(s)`, both may be called. /// /// # Example /// ``` /// use slack_blocks::{blocks::Section, /// elems::Image, /// text, /// text::ToSlackPlaintext}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .field("bar".plaintext()) /// .field("baz".plaintext()) /// // alternatively: /// .fields(vec!["bar".plaintext(), /// "baz".plaintext()] /// .into_iter() /// .map(text::Text::from) /// ) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` #[derive(Debug)] pub struct SectionBuilder<'a, Text> { accessory: Option<BlockElement<'a>>, text: Option<text::Text>, fields: Option<Vec<text::Text>>, block_id: Option<Cow<'a, str>>, state: PhantomData<Text>, } impl<'a, E> SectionBuilder<'a, E> { /// Create a new SectionBuilder pub fn new() -> Self { Self { accessory: None, text: None, fields: None, block_id: None, state: PhantomData::<_> } } /// Set `accessory` (Optional) pub fn accessory<B>(mut self, acc: B) -> Self where B: Into<BlockElement<'a>> { self.accessory = Some(acc.into()); self } /// Add `text` (**Required: this or `field(s)`**) /// /// The text for the block, in the form of a [text object 🔗]. /// /// Maximum length for the text in this field is 3000 characters. /// /// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { SectionBuilder { accessory: self.accessory, text: Some(text.into()), fields: self.fields, block_id: self.block_id, state: PhantomData::<_> } } /// Set `fields` (**Required: this or `text`**) /// /// A collection of [text objects 🔗]. /// /// Any text objects included with fields will be /// rendered in a compact format that allows for /// 2 columns of side-by-side text. /// /// Maximum number of items is 10. /// /// Maximum length for the text in each item is 2000 characters. /// /// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>> where I: IntoIterator<Item = text::Text> { SectionBuilder { accessory: self.accessory, text: self.text, fields: Some(fields.into_iter().collect()), block_id: self.block_id, state: PhantomData::<_> } } /// Append a single field to `fields`. pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { let mut fields = self.fields.take().unwrap_or_default(); fields.push(text.into()); self.fields(fields) } /// XML macro children, appends `fields` to the Section. /// /// To set `text`, use the `text` attribute. /// ``` /// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext}; /// /// let xml = blox! { /// <section_block text={"Section".plaintext()}> /// <text kind=plain>"Foo"</text> /// <text kind=plain>"Bar"</text> /// </section_block> /// }; /// /// let equiv = Section::builder().text("Section".plaintext()) /// .field("Foo".plaintext()) /// .field("Bar".plaintext()) /// .build(); /// /// assert_eq!(xml, equiv); /// ``` #[cfg(feature = "blox")] #[cfg_attr(docsrs, doc(cfg(feature = "blox")))] pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { self.field(text) } /// Set `block_id` (Optional) /// /// A string acting as a unique identifier for a block. /// /// You can use this `block_id` when you receive an interaction payload /// to [identify the source of the action 🔗]. /// /// If not specified, a `block_id` will be generated. /// /// Maximum length for this field is 255 characters. /// /// [identify the source of the action 🔗]: https://api.slack.com/interactivity/handling#payloads pub fn block_id<S>(mut self, block_id: S) -> Self where S: Into<Cow<'a, str>> { self.block_id = Some(block_id.into()); self } } impl<'a> SectionBuilder<'a, Set<method::text>> { /// All done building, now give me a darn actions block! /// /// > `no method name 'build' found for struct 'SectionBuilder<...>'`? /// Make sure all required setter methods have been called. See docs for `SectionBuilder`. /// /// ```compile_fail /// use slack_blocks::blocks::Section; /// /// let foo = Section::builder().build(); // Won't compile! /// ``` /// /// ``` /// use slack_blocks::{blocks::Section, /// compose::text::ToSlackPlaintext, /// elems::Image}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` pub fn build(self) -> Section<'a> { Section { text: self.text, fields: self.fields.map(|fs| fs.into()), accessory: self.accessory, block_id: self.block_id } } } } #[cfg(feature = "validation")] mod validate { use super::*; use crate::{compose::text, val_helpr::{below_len, ValidatorResult}}; pub(super) fn text(text: &text::Text) -> ValidatorResult { below_len("Section.text", 3000, text.as_ref()) } pub(super) fn block_id(text: &Cow<str>) -> ValidatorResult { below_len("Section.block_id", 255, text.as_ref()) } pub(super) fn fields(texts: &Cow<[text::Text]>) -> ValidatorResult { below_len("Section.fie
10, texts.as_ref()).and( texts.iter() .map(|text| { below_len( "Section.fields", 2000, text.as_ref()) }) .collect(), ) } }
lds",
identifier_name
section.rs
//! # Section Block //! //! _[slack api docs 🔗]_ //! //! Available in surfaces: //! - [modals 🔗] //! - [messages 🔗] //! - [home tabs 🔗] //! //! A `section` is one of the most flexible blocks available - //! it can be used as a simple text block, //! in combination with text fields, //! or side-by-side with any of the available [block elements 🔗] //! //! [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section //! [modals 🔗]: https://api.slack.com/surfaces/modals //! [messages 🔗]: https://api.slack.com/surfaces/messages //! [home tabs 🔗]: https://api.slack.com/surfaces/tabs //! [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements use std::borrow::Cow; use serde::{Deserialize, Serialize}; #[cfg(feature = "validation")] use validator::Validate; #[cfg(feature = "validation")] use crate::val_helpr::ValidationResult; use crate::{compose::text, elems::BlockElement}; /// # Section Block /// /// _[slack api docs 🔗]_ /// /// Available in surfaces: /// - [modals 🔗] /// - [messages 🔗] /// - [home tabs 🔗] /// /// A `section` is one of the most flexible blocks available - /// it can be used as a simple text block, /// in combination with text fields, /// or side-by-side with any of the available [block elements 🔗] /// /// [slack api docs 🔗]: https://api.slack.com/reference/block-kit/blocks#section /// [modals 🔗]: https://api.slack.com/surfaces/modals /// [messages 🔗]: https://api.slack.com/surfaces/messages /// [home tabs 🔗]: https://api.slack.com/surfaces/tabs /// [block elements 🔗]: https://api.slack.com/reference/messaging/block-elements #[derive(Clone, Debug, Deserialize, Hash, PartialEq, Serialize)] #[cfg_attr(feature = "validation", derive(Validate))] pub struct Section<'a> { #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::fields"))] fields: Option<Cow<'a, [text::Text]>>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::text"))] text: Option<text::Text>, #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr(feature = "validation", validate(custom = "validate::block_id"))] block_id: Option<Cow<'a, str>>, /// One of the available [element objects 🔗][element_objects]. /// /// [element_objects]: https://api.slack.com/reference/messaging/block-elements #[serde(skip_serializing_if = "Option::is_none")] accessory: Option<BlockElement<'a>>, } impl<'a> Section<'a> { /// Build a new section block /// /// For example, see `blocks::section::build::SectionBuilder`. pub fn builder() -> build::SectionBuilderInit<'a> { build::SectionBuilderInit::new() } /// Validate that this Section block agrees with Slack's model requirements /// /// # Errors /// - If `fields` contains more than 10 fields /// - If one of `fields` longer than 2000 chars /// - If `text` longer than 3000 chars /// - If `block_id` longer than 255 chars /// /// # Example /// ``` /// use slack_blocks::{blocks, compose::text}; /// /// let long_string = std::iter::repeat(' ').take(256).collect::<String>(); /// /// let block = blocks::Section::builder().text(text::Plain::from("file_id")) /// .block_id(long_string) /// .build(); /// /// assert_eq!(true, matches!(block.validate(), Err(_))); /// ``` #[cfg(feature = "validation")] #[cfg_attr(docsrs, doc(cfg(feature = "validation")))] pub fn validate(&self) -> ValidationResult { Validate::validate(self) } } /// Section block builder pub mod build { use std::marker::PhantomData; use super::*; use crate::build::*; /// Compile-time markers for builder methods #[allow(non_camel_case_types)] pub mod method { /// SectionBuilder.text #[derive(Clone, Copy, Debug)] pub struct text; } /// Initial state for `SectionBuilder` pub type SectionBuilderInit<'a> = SectionBuilder<'a, RequiredMethodNotCalled<method::text>>; /// Build an Section block /// /// Allows you to construct safely, with compile-time checks /// on required setter methods. /// /// # Required Methods /// `SectionBuilder::build()` is only available if these methods have been called: /// - `text` **or** `field(s)`, both may be called. /// /// # Example /// ``` /// use slack_blocks::{blocks::Section, /// elems::Image, /// text, /// text::ToSlackPlaintext}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .field("bar".plaintext()) /// .field("baz".plaintext()) /// // alternatively: /// .fields(vec!["bar".plaintext(), /// "baz".plaintext()] /// .into_iter() /// .map(text::Text::from) /// ) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` #[derive(Debug)] pub struct SectionBuilder<'a, Text> { accessory: Option<BlockElement<'a>>, text: Option<text::Text>, fields: Option<Vec<text::Text>>, block_id: Option<Cow<'a, str>>, state: PhantomData<Text>, } impl<'a, E> SectionBuilder<'a, E> { /// Create a new SectionBuilder pub fn new() -> Self { Self { accessory: None, text: None, fields: None, block_id: None, state: PhantomData::<_> } } /// Set `accessory` (Optional) pub fn accessory<B>(mut self, acc: B) -> Self where B: Into<BlockElement<'a>> { self.accessory = Some(acc.into()); self } /// Add `text` (**Required: this or `field(s)`**) /// /// The text for the block, in the form of a [text object 🔗]. /// /// Maximum length for the text in this field is 3000 characters. /// /// [text object 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn text<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { SectionBuilder { accessory: self.accessory, text: Some(text.into()), fields: self.fields, block_id: self.block_id, state: PhantomData::<_> } } /// Set `fields` (**Required: this or `text`**) /// /// A collection of [text objects 🔗]. /// /// Any text objects included with fields will be /// rendered in a compact format that allows for /// 2 columns of side-by-side text. /// /// Maximum number of items is 10. /// /// Maximum length for the text in each item is 2000 characters. /// /// [text objects 🔗]: https://api.slack.com/reference/messaging/composition-objects#text pub fn fields<I>(self, fields: I) -> SectionBuilder<'a, Set<method::text>> where I: IntoIterator<Item = text::Text> { SectionBuilder { accessory: self.accessory, text: self.text, fields: Some(fields.into_iter().collect()), block_id: self.block_id, state: PhantomData::<_> } } /// Append a single field to `fields`. pub fn field<T>(mut self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { let mut fields = self.fields.take().unwrap_or_default(); fields.push(text.into()); self.fields(fields) } /// XML macro children, appends `fields` to the Section. /// /// To set `text`, use the `text` attribute. /// ``` /// use slack_blocks::{blocks::Section, blox::*, text, text::ToSlackPlaintext}; /// /// let xml = blox! { /// <section_block text={"Section".plaintext()}> /// <text kind=plain>"Foo"</text> /// <text kind=plain>"Bar"</text> /// </section_block> /// }; /// /// let equiv = Section::builder().text("Section".plaintext()) /// .field("Foo".plaintext()) /// .field("Bar".plaintext()) /// .build(); /// /// assert_eq!(xml, equiv); /// ``` #[cfg(feature = "blox")] #[cfg_attr(docsrs, doc(cfg(feature = "blox")))] pub fn child<T>(self, text: T) -> SectionBuilder<'a, Set<method::text>> where T: Into<text::Text> { self.field(text) } /// Set `block_id` (Optional) ///
ique identifier for a block. /// /// You can use this `block_id` when you receive an interaction payload /// to [identify the source of the action 🔗]. /// /// If not specified, a `block_id` will be generated. /// /// Maximum length for this field is 255 characters. /// /// [identify the source of the action 🔗]: https://api.slack.com/interactivity/handling#payloads pub fn block_id<S>(mut self, block_id: S) -> Self where S: Into<Cow<'a, str>> { self.block_id = Some(block_id.into()); self } } impl<'a> SectionBuilder<'a, Set<method::text>> { /// All done building, now give me a darn actions block! /// /// > `no method name 'build' found for struct 'SectionBuilder<...>'`? /// Make sure all required setter methods have been called. See docs for `SectionBuilder`. /// /// ```compile_fail /// use slack_blocks::blocks::Section; /// /// let foo = Section::builder().build(); // Won't compile! /// ``` /// /// ``` /// use slack_blocks::{blocks::Section, /// compose::text::ToSlackPlaintext, /// elems::Image}; /// /// let block = /// Section::builder().text("foo".plaintext()) /// .accessory(Image::builder().image_url("foo.png") /// .alt_text("pic of foo") /// .build()) /// .build(); /// ``` pub fn build(self) -> Section<'a> { Section { text: self.text, fields: self.fields.map(|fs| fs.into()), accessory: self.accessory, block_id: self.block_id } } } } #[cfg(feature = "validation")] mod validate { use super::*; use crate::{compose::text, val_helpr::{below_len, ValidatorResult}}; pub(super) fn text(text: &text::Text) -> ValidatorResult { below_len("Section.text", 3000, text.as_ref()) } pub(super) fn block_id(text: &Cow<str>) -> ValidatorResult { below_len("Section.block_id", 255, text.as_ref()) } pub(super) fn fields(texts: &Cow<[text::Text]>) -> ValidatorResult { below_len("Section.fields", 10, texts.as_ref()).and( texts.iter() .map(|text| { below_len( "Section.fields", 2000, text.as_ref()) }) .collect(), ) } }
/// A string acting as a un
identifier_body
install.rs
ERROR: type should be large_string, got " https://bugzilla.redhat.com/show_bug.cgi?id=1905159\n #[allow(clippy::match_bool, clippy::match_single_binding)]\n let sector_size = match is_dasd(device, None)\n .with_context(|| format!(\"checking whether {device} is an IBM DASD disk\"))?\n {\n #[cfg(target_arch = \"s390x\")]\n true => s390x::dasd_try_get_sector_size(device).transpose(),\n _ => None,\n };\n let sector_size = sector_size\n .unwrap_or_else(|| get_sector_size_for_path(Path::new(device)))\n .with_context(|| format!(\"getting sector size of {device}\"))?\n .get();\n\n // Set up DASD. We need to do this before initiating the download\n // because otherwise the download might time out while we're low-level\n // formatting the DASD.\n #[cfg(target_arch = \"s390x\")]\n {\n if is_dasd(device, None)? {\n if!save_partitions.is_empty() {\n // The user requested partition saving, but SavedPartitions\n // doesn't understand DASD VTOCs and won't find any partitions\n // to save.\n bail!(\"saving DASD partitions is not supported\");\n }\n s390x::prepare_dasd(device)?;\n }\n }\n\n // set up image source\n // create location\n let location: Box<dyn ImageLocation> = if let Some(image_file) = &config.image_file {\n Box::new(FileLocation::new(image_file))\n } else if let Some(image_url) = &config.image_url {\n Box::new(UrlLocation::new(image_url, config.fetch_retries))\n } else if config.offline {\n match OsmetLocation::new(config.architecture.as_str(), sector_size)? {\n Some(osmet) => Box::new(osmet),\n None => bail!(\"cannot perform offline install; metadata missing\"),\n }\n } else {\n // For now, using --stream automatically will cause a download. In the future, we could\n // opportunistically use osmet if the version and stream match an osmet file/the live ISO.\n\n let maybe_osmet = match config.stream {\n Some(_) => None,\n None => OsmetLocation::new(config.architecture.as_str(), sector_size)?,\n };\n\n if let Some(osmet) = maybe_osmet {\n Box::new(osmet)\n } else {\n let format = match sector_size {\n 4096 => \"4k.raw.xz\",\n 512 => \"raw.xz\",\n n => {\n // could bail on non-512, but let's be optimistic and just warn but try the regular\n // 512b image\n eprintln!(\n \"Found non-standard sector size {n} for {device}, assuming 512b-compatible\"\n );\n \"raw.xz\"\n }\n };\n Box::new(StreamLocation::new(\n config.stream.as_deref().unwrap_or(\"stable\"),\n config.architecture.as_str(),\n \"metal\",\n format,\n config.stream_base_url.as_ref(),\n config.fetch_retries,\n )?)\n }\n };\n // report it to the user\n eprintln!(\"{location}\");\n // we only support installing from a single artifact\n let mut sources = location.sources()?;\n let mut source = sources.pop().context(\"no artifacts found\")?;\n if!sources.is_empty() {\n bail!(\"found multiple artifacts\");\n }\n if source.signature.is_none() && location.require_signature() {\n if config.insecure {\n eprintln!(\"Signature not found; skipping verification as requested\");\n } else {\n bail!(\"--insecure not specified and signature not found\");\n }\n }\n\n // open output; ensure it's a block device and we have exclusive access\n let mut dest = OpenOptions::new()\n .read(true)\n .write(true)\n .open(device)\n .with_context(|| format!(\"opening {device}\"))?;\n if!dest\n .metadata()\n .with_context(|| format!(\"getting metadata for {device}\"))?\n .file_type()\n .is_block_device()\n {\n bail!(\"{} is not a block device\", device);\n }\n ensure_exclusive_access(device)\n .with_context(|| format!(\"checking for exclusive access to {device}\"))?;\n\n // save partitions that we plan to keep\n let saved = SavedPartitions::new_from_disk(&mut dest, &save_partitions)\n .with_context(|| format!(\"saving partitions from {device}\"))?;\n\n // get reference to partition table\n // For kpartx partitioning, this will conditionally call kpartx -d\n // when dropped\n let mut table = Disk::new(device)?\n .get_partition_table()\n .with_context(|| format!(\"getting partition table for {device}\"))?;\n\n // copy and postprocess disk image\n // On failure, clear and reread the partition table to prevent the disk\n // from accidentally being used.\n dest.rewind().with_context(|| format!(\"seeking {device}\"))?;\n if let Err(err) = write_disk(\n &config,\n &mut source,\n &mut dest,\n &mut *table,\n &saved,\n ignition,\n network_config,\n ) {\n // log the error so the details aren't dropped if we encounter\n // another error during cleanup\n eprintln!(\"\\nError: {err:?}\\n\");\n\n // clean up\n if config.preserve_on_error {\n eprintln!(\"Preserving partition table as requested\");\n if saved.is_saved() {\n // The user asked to preserve the damaged partition table\n // for debugging. We also have saved partitions, and those\n // may or may not be in the damaged table depending where we\n // failed. Preserve the saved partitions by writing them to\n // a file in /tmp and telling the user about it. Hey, it's\n // a debug flag.\n stash_saved_partitions(&mut dest, &saved)?;\n }\n } else {\n reset_partition_table(&config, &mut dest, &mut *table, &saved)?;\n }\n\n // return a generic error so our exit status is right\n bail!(\"install failed\");\n }\n\n // Because grub picks /boot by label and the OS picks /boot, we can end up racing/flapping\n // between picking a /boot partition on startup. So check amount of filesystems labeled 'boot'\n // and warn user if it's not only one\n match get_filesystems_with_label(\"boot\", true) {\n Ok(pts) => {\n if pts.len() > 1 {\n let rootdev = fs::canonicalize(device)\n .unwrap_or_else(|_| PathBuf::from(device))\n .to_string_lossy()\n .to_string();\n let pts = pts\n .iter()\n .filter(|pt|!pt.contains(&rootdev))\n .collect::<Vec<_>>();\n eprintln!(\"\\nNote: detected other devices with a filesystem labeled `boot`:\");\n for pt in pts {\n eprintln!(\" - {pt}\");\n }\n eprintln!(\"The installed OS may not work correctly if there are multiple boot filesystems.\nBefore rebooting, investigate whether these filesystems are needed and consider\nwiping them with `wipefs -a`.\\n\"\n );\n }\n }\n Err(e) => eprintln!(\"checking filesystems labeled 'boot': {e:?}\"),\n }\n\n eprintln!(\"Install complete.\");\n Ok(())\n}\n\nfn parse_partition_filters(labels: &[&str], indexes: &[&str]) -> Result<Vec<PartitionFilter>> {\n use PartitionFilter::*;\n let mut filters: Vec<PartitionFilter> = Vec::new();\n\n // partition label globs\n for glob in labels {\n let filter = Label(\n glob::Pattern::new(glob)\n .with_context(|| format!(\"couldn't parse label glob '{glob}'\"))?,\n );\n filters.push(filter);\n }\n\n // partition index ranges\n let parse_index = |i: &str| -> Result<Option<NonZeroU32>> {\n match i {\n \"\" => Ok(None), // open end of range\n _ => Ok(Some(\n NonZeroU32::new(\n i.parse()\n .with_context(|| format!(\"couldn't parse partition index '{i}'\"))?,\n )\n .context(\"partition index cannot be zero\")?,\n )),\n }\n };\n for range in indexes {\n let parts: Vec<&str> = range.split('-').collect();\n let filter = match parts.len() {\n 1 => Index(parse_index(parts[0])?, parse_index(parts[0])?),\n 2 => Index(parse_index(parts[0])?, parse_index(parts[1])?),\n _ => bail!(\"couldn't parse partition index range '{}'\", range),\n };\n match filter {\n Index(None, None) => bail!(\n \"both ends of partition index range '{}' cannot be open\",\n range\n ),\n Index(Some(x), Some(y)) if x > y => bail!(\n \"start of partition index range '{}' cannot be greater than end\",\n range\n ),\n _ => filters.push(filter),\n };\n }\n Ok(filters)\n}\n\nfn ensure_exclusive_access(device: &str) -> Result<()> {\n let mut parts = Disk::new(device)?.get_busy_partitions()?;\n if parts.is_empty() {\n return Ok(());\n }\n parts.sort_unstable_by_key(|p| p.path.to_string());\n eprintln!(\"Partitions in use on {device}:\");\n for part in parts {\n if let Some(mountpoint) = part.mountpoint.as_ref() {\n eprintln!(\" {} mounted on {}\", part.path, mountpoint);\n }\n if part.swap {\n eprintln!(\" {} is swap device\", part.path);\n }\n for holder in part.get_holders()? {\n eprintln!(\" {} in use by {}\", part.path, holder);\n }\n }\n bail!(\"found busy partitions\");\n}\n\n/// Copy the image source to the target disk and do all post-processing.\n/// If this function fails, the caller should wipe the partition table\n/// to ensure the user doesn't boot from a partially-written disk.\nfn write_disk(\n config: &InstallConfig,\n source: &mut ImageSource,\n dest: &mut File,\n table: &mut dyn PartTable,\n saved: &SavedPartitions,\n ignition: Option<File>,\n network_config: Option<&str>,\n) -> Result<()> {\n let device = config.dest_device.as_deref().expect(\"device missing\");\n\n // Get sector size of destination, for comparing with image\n let sector_size = get_sector_size(dest)?;\n\n // copy the image\n #[allow(clippy::match_bool, clippy::match_single_binding)]\n let image_copy = match is_dasd(device, Some(dest))? {\n #[cfg(target_arch = \"s390x\")]\n true => s390x::image_copy_s390x,\n _ => image_copy_default,\n };\n write_image(\n source,\n dest,\n Path::new(device),\n image_copy,\n true,\n Some(saved),\n Some(sector_size),\n VerifyKeys::Production,\n )?;\n table.reread()?;\n\n // postprocess\n if ignition.is_some()\n || config.firstboot_args.is_some()\n ||!config.append_karg.is_empty()\n ||!config.delete_karg.is_empty()\n || config.platform.is_some()\n ||!config.console.is_empty()\n || network_config.is_some()\n || cfg!(target_arch = \"s390x\")\n {\n let mount = Disk::new(device)?.mount_partition_by_label(\"boot\", mount::MsFlags::empty())?;\n if let Some(ignition) = ignition.as_ref() {\n write_ignition(mount.mountpoint(), &config.ignition_hash, ignition)\n .context(\"writing Ignition configuration\")?;\n }\n if let Some(platform) = config.platform.as_ref() {\n write_platform(mount.mountpoint(), platform).context(\"writing platform ID\")?;\n }\n if config.platform.is_some() ||!config.console.is_empty() {\n write_console(\n mount.mountpoint(),\n config.platform.as_deref(),\n &config.console,\n )\n .context(\"configuring console\")?;\n }\n if let Some(firstboot_args) = config.firstboot_args.as_ref() {\n write_firstboot_kargs(mount.mountpoint(), firstboot_args)\n .context(\"writing firstboot kargs\")?;\n }\n if!config.append_karg.is_empty() ||!config.delete_karg.is_empty() {\n eprintln!(\"Modifying kernel arguments\");\n\n Console::maybe_warn_on_kargs(&config.append_karg, \"--append-karg\", \"--console\");\n visit_bls_entry_options(mount.mountpoint(), |orig_options: &str| {\n KargsEditor::new()\n .append(config.append_karg.as_slice())\n .delete(config.delete_karg.as_slice())\n .maybe_apply_to(orig_options)\n })\n .context(\"deleting and appending kargs\")?;\n }\n if let Some(network_config) = network_config.as_ref() {\n copy_network_config(mount.mountpoint(), network_config)?;\n }\n #[cfg(target_arch = \"s390x\")]\n {\n s390x::zipl(\n mount.mountpoint(),\n None,\n None,\n s390x::ZiplSecexMode::Disable,\n None,\n )?;\n s390x::chreipl(device)?;\n }\n }\n\n // detect any latent write errors\n dest.sync_all().context(\"syncing data to disk\")?;\n\n Ok(())\n}\n\n/// Write the Ignition config.\nfn write_ignition(\n mountpoint: &Path,\n digest_in: &Option<IgnitionHash>,\n mut config_in: &File,\n) -> Result<()> {\n eprintln!(\"Writing Ignition config\");\n\n // Verify configuration digest, if any.\n if let Some(digest) = &digest_in {\n digest\n .validate(&mut config_in)\n .context(\"failed to validate Ignition configuration digest\")?;\n config_in\n .rewind()\n .context(\"rewinding Ignition configuration file\")?;\n };\n\n // make parent directory\n let mut config_dest = mountpoint.to_path_buf();\n config_dest.push(\"ignition\");\n if!config_dest.is_dir() {\n fs::create_dir_all(&config_dest).with_context(|| {\n format!(\n \"creating Ignition config directory {}\",\n config_dest.display()\n )\n })?;\n // Ignition data may contain secrets; restrict to root\n fs::set_permissions(&config_dest, Permissions::from_mode(0o700)).with_context(|| {\n format!(\n \"setting file mode for Ignition directory {}\",\n config_dest.display()\n )\n })?;\n }\n\n // do the copy\n config_dest.push(\"config.ign\");\n let mut config_out = OpenOptions::new()\n .write(true)\n .create_new(true)\n .open(&config_dest)\n .with_context(|| {\n format!(\n \"opening destination Ignition config {}\",\n config_dest.display()\n )\n })?;\n // Ignition config may contain secrets; restrict to root\n fs::set_permissions(&config_dest, Permissions::from_mode(0o600)).with_context(|| {\n format!(\n \"setting file mode for destination Ignition config {}\",\n config_dest.display()\n )\n })?;\n io::copy(&mut config_in, &mut config_out).context(\"writing Ignition config\")?;\n\n Ok(())\n}\n\n/// Write first-boot kernel arguments.\nfn "
(mountpoint: &Path, args: &str) -> Result<()> { eprintln!("Writing first-boot kernel arguments"); // write the arguments let mut config_dest = mountpoint.to_path_buf(); config_dest.push("ignition.firstboot"); // if the file doesn't already exist, fail, since our assumptions // are wrong let mut config_out = OpenOptions::new() .append(true) .open(&config_dest) .with_context(|| format!("opening first-boot file {}", config_dest.display()))?; let contents = format!("set ignition_network_kcmdline=\"{args}\"\n"); config_out .write_all(contents.as_bytes()) .context("writing first-boot kernel arguments")?; Ok(()) } #[derive(Clone, Default, Deserialize)] struct PlatformSpec { #[serde(default)] grub_commands: Vec<String>, #[serde(default)] kernel_arguments: Vec<String>, } /// Override the platform ID. fn write_platform(mountpoint: &Path, platform: &str) -> Result<()> { // early return if setting the platform to the default value, since // otherwise we'll think we failed to set it if platform == "metal" { return Ok(()); } eprintln!("Setting platform to {platform}"); // We assume that we will only install from metal images and that the // bootloader configs will always set ignition.platform.id. visit_bls_entry_options(mountpoint, |orig_options: &str| { let new_options = KargsEditor::new() .replace(&[format!("ignition.platform.id=metal={platform}")]) .apply_to(orig_options) .context("setting platform ID argument")?; if orig_options == new_options { bail!("couldn't locate platform ID"); } Ok(Some(new_options)) })?; Ok(()) } /// Configure console kernel arguments and GRUB commands. fn write_console(mountpoint: &Path, platform: Option<&str>, consoles: &[Console]) -> Result<()> { // read platforms table let platforms = match fs::read_to_string(mountpoint.join("coreos/platforms.json")) { Ok(json) => serde_json::from_str::<HashMap<String, PlatformSpec>>(&json) .context("parsing platform table")?, // no table for this image? Err(e) if e.kind() == std::io::ErrorKind::NotFound => Default::default(), Err(e) => return Err(e).context("reading platform table"), }; let mut kargs = Vec::new(); let mut grub_commands = Vec::new(); if!consoles.is_empty() { // custom console settings completely override platform-specific // defaults let mut grub_terminals = Vec::new(); for console in consoles { kargs.push(console.karg()); if let Some(cmd) = console.grub_command() { grub_commands.push(cmd); } grub_terminals.push(console.grub_terminal()); } grub_terminals.sort_unstable(); grub_terminals.dedup(); for direction in ["input", "output"] { grub_commands.push(format!("terminal_{direction} {}", grub_terminals.join(" "))); } } else if let Some(platform) = platform { // platform-specific defaults if platform == "metal" { // we're just being asked to apply the defaults which are already // applied return Ok(()); } let spec = platforms.get(platform).cloned().unwrap_or_default(); kargs.extend(spec.kernel_arguments); grub_commands.extend(spec.grub_commands); } else { // nothing to do and the caller shouldn't have called us unreachable!(); } // set kargs, removing any metal-specific ones let metal_spec = platforms.get("metal").cloned().unwrap_or_default(); visit_bls_entry_options(mountpoint, |orig_options: &str| { KargsEditor::new() .append(&kargs) .delete(&metal_spec.kernel_arguments)
write_firstboot_kargs
identifier_name
install.rs
// log the error so the details aren't dropped if we encounter // another error during cleanup eprintln!("\nError: {err:?}\n"); // clean up if config.preserve_on_error { eprintln!("Preserving partition table as requested"); if saved.is_saved() { // The user asked to preserve the damaged partition table // for debugging. We also have saved partitions, and those // may or may not be in the damaged table depending where we // failed. Preserve the saved partitions by writing them to // a file in /tmp and telling the user about it. Hey, it's // a debug flag. stash_saved_partitions(&mut dest, &saved)?; } } else { reset_partition_table(&config, &mut dest, &mut *table, &saved)?; } // return a generic error so our exit status is right bail!("install failed"); } // Because grub picks /boot by label and the OS picks /boot, we can end up racing/flapping // between picking a /boot partition on startup. So check amount of filesystems labeled 'boot' // and warn user if it's not only one match get_filesystems_with_label("boot", true) { Ok(pts) => { if pts.len() > 1 { let rootdev = fs::canonicalize(device) .unwrap_or_else(|_| PathBuf::from(device)) .to_string_lossy() .to_string(); let pts = pts .iter() .filter(|pt|!pt.contains(&rootdev)) .collect::<Vec<_>>(); eprintln!("\nNote: detected other devices with a filesystem labeled `boot`:"); for pt in pts { eprintln!(" - {pt}"); } eprintln!("The installed OS may not work correctly if there are multiple boot filesystems. Before rebooting, investigate whether these filesystems are needed and consider wiping them with `wipefs -a`.\n" ); } } Err(e) => eprintln!("checking filesystems labeled 'boot': {e:?}"), } eprintln!("Install complete."); Ok(()) } fn parse_partition_filters(labels: &[&str], indexes: &[&str]) -> Result<Vec<PartitionFilter>> { use PartitionFilter::*; let mut filters: Vec<PartitionFilter> = Vec::new(); // partition label globs for glob in labels { let filter = Label( glob::Pattern::new(glob) .with_context(|| format!("couldn't parse label glob '{glob}'"))?, ); filters.push(filter); } // partition index ranges let parse_index = |i: &str| -> Result<Option<NonZeroU32>> { match i { "" => Ok(None), // open end of range _ => Ok(Some( NonZeroU32::new( i.parse() .with_context(|| format!("couldn't parse partition index '{i}'"))?, ) .context("partition index cannot be zero")?, )), } }; for range in indexes { let parts: Vec<&str> = range.split('-').collect(); let filter = match parts.len() { 1 => Index(parse_index(parts[0])?, parse_index(parts[0])?), 2 => Index(parse_index(parts[0])?, parse_index(parts[1])?), _ => bail!("couldn't parse partition index range '{}'", range), }; match filter { Index(None, None) => bail!( "both ends of partition index range '{}' cannot be open", range ), Index(Some(x), Some(y)) if x > y => bail!( "start of partition index range '{}' cannot be greater than end", range ), _ => filters.push(filter), }; } Ok(filters) } fn ensure_exclusive_access(device: &str) -> Result<()> { let mut parts = Disk::new(device)?.get_busy_partitions()?; if parts.is_empty() { return Ok(()); } parts.sort_unstable_by_key(|p| p.path.to_string()); eprintln!("Partitions in use on {device}:"); for part in parts { if let Some(mountpoint) = part.mountpoint.as_ref() { eprintln!(" {} mounted on {}", part.path, mountpoint); } if part.swap { eprintln!(" {} is swap device", part.path); } for holder in part.get_holders()? { eprintln!(" {} in use by {}", part.path, holder); } } bail!("found busy partitions"); } /// Copy the image source to the target disk and do all post-processing. /// If this function fails, the caller should wipe the partition table /// to ensure the user doesn't boot from a partially-written disk. fn write_disk( config: &InstallConfig, source: &mut ImageSource, dest: &mut File, table: &mut dyn PartTable, saved: &SavedPartitions, ignition: Option<File>, network_config: Option<&str>, ) -> Result<()> { let device = config.dest_device.as_deref().expect("device missing"); // Get sector size of destination, for comparing with image let sector_size = get_sector_size(dest)?; // copy the image #[allow(clippy::match_bool, clippy::match_single_binding)] let image_copy = match is_dasd(device, Some(dest))? { #[cfg(target_arch = "s390x")] true => s390x::image_copy_s390x, _ => image_copy_default, }; write_image( source, dest, Path::new(device), image_copy, true, Some(saved), Some(sector_size), VerifyKeys::Production, )?; table.reread()?; // postprocess if ignition.is_some() || config.firstboot_args.is_some() ||!config.append_karg.is_empty() ||!config.delete_karg.is_empty() || config.platform.is_some() ||!config.console.is_empty() || network_config.is_some() || cfg!(target_arch = "s390x") { let mount = Disk::new(device)?.mount_partition_by_label("boot", mount::MsFlags::empty())?; if let Some(ignition) = ignition.as_ref() { write_ignition(mount.mountpoint(), &config.ignition_hash, ignition) .context("writing Ignition configuration")?; } if let Some(platform) = config.platform.as_ref() { write_platform(mount.mountpoint(), platform).context("writing platform ID")?; } if config.platform.is_some() ||!config.console.is_empty() { write_console( mount.mountpoint(), config.platform.as_deref(), &config.console, ) .context("configuring console")?; } if let Some(firstboot_args) = config.firstboot_args.as_ref() { write_firstboot_kargs(mount.mountpoint(), firstboot_args) .context("writing firstboot kargs")?; } if!config.append_karg.is_empty() ||!config.delete_karg.is_empty() { eprintln!("Modifying kernel arguments"); Console::maybe_warn_on_kargs(&config.append_karg, "--append-karg", "--console"); visit_bls_entry_options(mount.mountpoint(), |orig_options: &str| { KargsEditor::new() .append(config.append_karg.as_slice()) .delete(config.delete_karg.as_slice()) .maybe_apply_to(orig_options) }) .context("deleting and appending kargs")?; } if let Some(network_config) = network_config.as_ref() { copy_network_config(mount.mountpoint(), network_config)?; } #[cfg(target_arch = "s390x")] { s390x::zipl( mount.mountpoint(), None, None, s390x::ZiplSecexMode::Disable, None, )?; s390x::chreipl(device)?; } } // detect any latent write errors dest.sync_all().context("syncing data to disk")?; Ok(()) } /// Write the Ignition config. fn write_ignition( mountpoint: &Path, digest_in: &Option<IgnitionHash>, mut config_in: &File, ) -> Result<()> { eprintln!("Writing Ignition config"); // Verify configuration digest, if any. if let Some(digest) = &digest_in { digest .validate(&mut config_in) .context("failed to validate Ignition configuration digest")?; config_in .rewind() .context("rewinding Ignition configuration file")?; }; // make parent directory let mut config_dest = mountpoint.to_path_buf(); config_dest.push("ignition"); if!config_dest.is_dir() { fs::create_dir_all(&config_dest).with_context(|| { format!( "creating Ignition config directory {}", config_dest.display() ) })?; // Ignition data may contain secrets; restrict to root fs::set_permissions(&config_dest, Permissions::from_mode(0o700)).with_context(|| { format!( "setting file mode for Ignition directory {}", config_dest.display() ) })?; } // do the copy config_dest.push("config.ign"); let mut config_out = OpenOptions::new() .write(true) .create_new(true) .open(&config_dest) .with_context(|| { format!( "opening destination Ignition config {}", config_dest.display() ) })?; // Ignition config may contain secrets; restrict to root fs::set_permissions(&config_dest, Permissions::from_mode(0o600)).with_context(|| { format!( "setting file mode for destination Ignition config {}", config_dest.display() ) })?; io::copy(&mut config_in, &mut config_out).context("writing Ignition config")?; Ok(()) } /// Write first-boot kernel arguments. fn write_firstboot_kargs(mountpoint: &Path, args: &str) -> Result<()> { eprintln!("Writing first-boot kernel arguments"); // write the arguments let mut config_dest = mountpoint.to_path_buf(); config_dest.push("ignition.firstboot"); // if the file doesn't already exist, fail, since our assumptions // are wrong let mut config_out = OpenOptions::new() .append(true) .open(&config_dest) .with_context(|| format!("opening first-boot file {}", config_dest.display()))?; let contents = format!("set ignition_network_kcmdline=\"{args}\"\n"); config_out .write_all(contents.as_bytes()) .context("writing first-boot kernel arguments")?; Ok(()) } #[derive(Clone, Default, Deserialize)] struct PlatformSpec { #[serde(default)] grub_commands: Vec<String>, #[serde(default)] kernel_arguments: Vec<String>, } /// Override the platform ID. fn write_platform(mountpoint: &Path, platform: &str) -> Result<()> { // early return if setting the platform to the default value, since // otherwise we'll think we failed to set it if platform == "metal" { return Ok(()); } eprintln!("Setting platform to {platform}"); // We assume that we will only install from metal images and that the // bootloader configs will always set ignition.platform.id. visit_bls_entry_options(mountpoint, |orig_options: &str| { let new_options = KargsEditor::new() .replace(&[format!("ignition.platform.id=metal={platform}")]) .apply_to(orig_options) .context("setting platform ID argument")?; if orig_options == new_options { bail!("couldn't locate platform ID"); } Ok(Some(new_options)) })?; Ok(()) } /// Configure console kernel arguments and GRUB commands. fn write_console(mountpoint: &Path, platform: Option<&str>, consoles: &[Console]) -> Result<()> { // read platforms table let platforms = match fs::read_to_string(mountpoint.join("coreos/platforms.json")) { Ok(json) => serde_json::from_str::<HashMap<String, PlatformSpec>>(&json) .context("parsing platform table")?, // no table for this image? Err(e) if e.kind() == std::io::ErrorKind::NotFound => Default::default(), Err(e) => return Err(e).context("reading platform table"), }; let mut kargs = Vec::new(); let mut grub_commands = Vec::new(); if!consoles.is_empty() { // custom console settings completely override platform-specific // defaults let mut grub_terminals = Vec::new(); for console in consoles { kargs.push(console.karg()); if let Some(cmd) = console.grub_command() { grub_commands.push(cmd); } grub_terminals.push(console.grub_terminal()); } grub_terminals.sort_unstable(); grub_terminals.dedup(); for direction in ["input", "output"] { grub_commands.push(format!("terminal_{direction} {}", grub_terminals.join(" "))); } } else if let Some(platform) = platform { // platform-specific defaults if platform == "metal" { // we're just being asked to apply the defaults which are already // applied return Ok(()); } let spec = platforms.get(platform).cloned().unwrap_or_default(); kargs.extend(spec.kernel_arguments); grub_commands.extend(spec.grub_commands); } else { // nothing to do and the caller shouldn't have called us unreachable!(); } // set kargs, removing any metal-specific ones let metal_spec = platforms.get("metal").cloned().unwrap_or_default(); visit_bls_entry_options(mountpoint, |orig_options: &str| { KargsEditor::new() .append(&kargs) .delete(&metal_spec.kernel_arguments) .maybe_apply_to(orig_options) .context("setting platform kernel arguments") })?; // set grub commands if grub_commands!= metal_spec.grub_commands { let path = mountpoint.join("grub2/grub.cfg"); let grub_cfg = fs::read_to_string(&path).context("reading grub.cfg")?; let new_grub_cfg = update_grub_cfg_console_settings(&grub_cfg, &grub_commands) .context("updating grub.cfg")?; fs::write(&path, new_grub_cfg).context("writing grub.cfg")?; } Ok(()) } /// Rewrite the grub.cfg CONSOLE-SETTINGS block to use the specified GRUB /// commands, and return the result. fn update_grub_cfg_console_settings(grub_cfg: &str, commands: &[String]) -> Result<String> { let mut new_commands = commands.join("\n"); if!new_commands.is_empty() { new_commands.push('\n'); } let re = Regex::new(GRUB_CFG_CONSOLE_SETTINGS_RE).unwrap(); if!re.is_match(grub_cfg) { bail!("missing substitution marker in grub.cfg"); } Ok(re .replace(grub_cfg, |caps: &Captures| { format!( "{}{}{}", caps.name("prefix").expect("didn't match prefix").as_str(), new_commands, caps.name("suffix").expect("didn't match suffix").as_str() ) }) .into_owned()) } /// Copy networking config if asked to do so fn copy_network_config(mountpoint: &Path, net_config_src: &str) -> Result<()> { eprintln!("Copying networking configuration from {net_config_src}"); // get the path to the destination directory let net_config_dest = mountpoint.join("coreos-firstboot-network"); // make the directory if it doesn't exist fs::create_dir_all(&net_config_dest).with_context(|| { format!( "creating destination networking config directory {}", net_config_dest.display() ) })?; // copy files from source to destination directories for entry in fs::read_dir(net_config_src) .with_context(|| format!("reading directory {net_config_src}"))? { let entry = entry.with_context(|| format!("reading directory {net_config_src}"))?; let srcpath = entry.path(); let destpath = net_config_dest.join(entry.file_name()); if srcpath.is_file() { eprintln!("Copying {} to installed system", srcpath.display()); fs::copy(&srcpath, destpath).context("Copying networking config")?; } } Ok(()) } /// Clear the partition table and restore saved partitions. For use after /// a failure. fn reset_partition_table( config: &InstallConfig, dest: &mut File, table: &mut dyn PartTable, saved: &SavedPartitions, ) -> Result<()> { eprintln!("Resetting partition table"); let device = config.dest_device.as_deref().expect("device missing"); if is_dasd(device, Some(dest))? { // Don't write out a GPT, since the backup GPT may overwrite // something we're not allowed to touch. Just clear the first MiB // of disk. dest.rewind().context("seeking to start of disk")?; let zeroes = [0u8; 1024 * 1024]; dest.write_all(&zeroes) .context("clearing primary partition table")?; } else { // Write a new GPT including any saved partitions. saved .overwrite(dest) .context("restoring saved partitions")?; } // Finish writeback and reread the partition table. dest.sync_all().context("syncing partition table to disk")?; table.reread()?; Ok(()) } // Preserve saved partitions by writing them to a file in /tmp and reporting // the path. fn stash_saved_partitions(disk: &mut File, saved: &SavedPartitions) -> Result<()> { let mut stash = tempfile::Builder::new() .prefix("coreos-installer-partitions.") .tempfile() .context("creating partition stash file")?; let path = stash.path().to_owned(); eprintln!("Storing saved partition entries to {}", path.display()); let len = disk.seek(SeekFrom::End(0)).context("seeking disk")?; stash .as_file() .set_len(len) .with_context(|| format!("extending partition stash file {}", path.display()))?; saved .overwrite(stash.as_file_mut()) .with_context(|| format!("stashing saved partitions to {}", path.display()))?; stash .keep() .with_context(|| format!("retaining saved partition stash in {}", path.display()))?; Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_partition_filters()
{ use PartitionFilter::*; let g = |v| Label(glob::Pattern::new(v).unwrap()); let i = |v| Some(NonZeroU32::new(v).unwrap()); assert_eq!( parse_partition_filters(&["foo", "z*b?", ""], &["1", "7-7", "2-4", "-3", "4-"]) .unwrap(), vec![ g("foo"), g("z*b?"), g(""), Index(i(1), i(1)), Index(i(7), i(7)), Index(i(2), i(4)), Index(None, i(3)), Index(i(4), None) ] );
identifier_body
install.rs
.collect::<Vec<&str>>(), &config .save_partindex .iter() .map(|s| s.as_str()) .collect::<Vec<&str>>(), )?; // compute sector size // Uninitialized ECKD DASD's blocksize is 512, but after formatting // it changes to the recommended 4096 // https://bugzilla.redhat.com/show_bug.cgi?id=1905159 #[allow(clippy::match_bool, clippy::match_single_binding)] let sector_size = match is_dasd(device, None) .with_context(|| format!("checking whether {device} is an IBM DASD disk"))? { #[cfg(target_arch = "s390x")] true => s390x::dasd_try_get_sector_size(device).transpose(), _ => None, }; let sector_size = sector_size .unwrap_or_else(|| get_sector_size_for_path(Path::new(device))) .with_context(|| format!("getting sector size of {device}"))? .get(); // Set up DASD. We need to do this before initiating the download // because otherwise the download might time out while we're low-level // formatting the DASD. #[cfg(target_arch = "s390x")] { if is_dasd(device, None)? { if!save_partitions.is_empty() { // The user requested partition saving, but SavedPartitions // doesn't understand DASD VTOCs and won't find any partitions // to save. bail!("saving DASD partitions is not supported"); } s390x::prepare_dasd(device)?; } } // set up image source // create location let location: Box<dyn ImageLocation> = if let Some(image_file) = &config.image_file { Box::new(FileLocation::new(image_file)) } else if let Some(image_url) = &config.image_url { Box::new(UrlLocation::new(image_url, config.fetch_retries)) } else if config.offline { match OsmetLocation::new(config.architecture.as_str(), sector_size)? { Some(osmet) => Box::new(osmet), None => bail!("cannot perform offline install; metadata missing"), } } else { // For now, using --stream automatically will cause a download. In the future, we could // opportunistically use osmet if the version and stream match an osmet file/the live ISO. let maybe_osmet = match config.stream { Some(_) => None, None => OsmetLocation::new(config.architecture.as_str(), sector_size)?, }; if let Some(osmet) = maybe_osmet { Box::new(osmet) } else { let format = match sector_size { 4096 => "4k.raw.xz", 512 => "raw.xz", n => { // could bail on non-512, but let's be optimistic and just warn but try the regular // 512b image eprintln!( "Found non-standard sector size {n} for {device}, assuming 512b-compatible" ); "raw.xz" } }; Box::new(StreamLocation::new( config.stream.as_deref().unwrap_or("stable"), config.architecture.as_str(), "metal", format, config.stream_base_url.as_ref(), config.fetch_retries, )?) } }; // report it to the user eprintln!("{location}"); // we only support installing from a single artifact let mut sources = location.sources()?; let mut source = sources.pop().context("no artifacts found")?; if!sources.is_empty() { bail!("found multiple artifacts"); } if source.signature.is_none() && location.require_signature() { if config.insecure { eprintln!("Signature not found; skipping verification as requested"); } else { bail!("--insecure not specified and signature not found"); } } // open output; ensure it's a block device and we have exclusive access let mut dest = OpenOptions::new() .read(true) .write(true) .open(device) .with_context(|| format!("opening {device}"))?; if!dest .metadata() .with_context(|| format!("getting metadata for {device}"))? .file_type() .is_block_device() { bail!("{} is not a block device", device); } ensure_exclusive_access(device) .with_context(|| format!("checking for exclusive access to {device}"))?; // save partitions that we plan to keep let saved = SavedPartitions::new_from_disk(&mut dest, &save_partitions) .with_context(|| format!("saving partitions from {device}"))?; // get reference to partition table // For kpartx partitioning, this will conditionally call kpartx -d // when dropped let mut table = Disk::new(device)? .get_partition_table() .with_context(|| format!("getting partition table for {device}"))?; // copy and postprocess disk image // On failure, clear and reread the partition table to prevent the disk // from accidentally being used. dest.rewind().with_context(|| format!("seeking {device}"))?; if let Err(err) = write_disk( &config, &mut source, &mut dest, &mut *table, &saved, ignition, network_config, ) { // log the error so the details aren't dropped if we encounter // another error during cleanup eprintln!("\nError: {err:?}\n"); // clean up if config.preserve_on_error { eprintln!("Preserving partition table as requested"); if saved.is_saved() { // The user asked to preserve the damaged partition table // for debugging. We also have saved partitions, and those // may or may not be in the damaged table depending where we // failed. Preserve the saved partitions by writing them to // a file in /tmp and telling the user about it. Hey, it's // a debug flag. stash_saved_partitions(&mut dest, &saved)?; } } else { reset_partition_table(&config, &mut dest, &mut *table, &saved)?; } // return a generic error so our exit status is right bail!("install failed"); } // Because grub picks /boot by label and the OS picks /boot, we can end up racing/flapping // between picking a /boot partition on startup. So check amount of filesystems labeled 'boot' // and warn user if it's not only one match get_filesystems_with_label("boot", true) { Ok(pts) => { if pts.len() > 1 { let rootdev = fs::canonicalize(device) .unwrap_or_else(|_| PathBuf::from(device)) .to_string_lossy() .to_string(); let pts = pts .iter() .filter(|pt|!pt.contains(&rootdev)) .collect::<Vec<_>>(); eprintln!("\nNote: detected other devices with a filesystem labeled `boot`:"); for pt in pts { eprintln!(" - {pt}"); } eprintln!("The installed OS may not work correctly if there are multiple boot filesystems. Before rebooting, investigate whether these filesystems are needed and consider wiping them with `wipefs -a`.\n" ); } } Err(e) => eprintln!("checking filesystems labeled 'boot': {e:?}"), } eprintln!("Install complete."); Ok(()) } fn parse_partition_filters(labels: &[&str], indexes: &[&str]) -> Result<Vec<PartitionFilter>> { use PartitionFilter::*; let mut filters: Vec<PartitionFilter> = Vec::new(); // partition label globs for glob in labels { let filter = Label( glob::Pattern::new(glob) .with_context(|| format!("couldn't parse label glob '{glob}'"))?, ); filters.push(filter); } // partition index ranges let parse_index = |i: &str| -> Result<Option<NonZeroU32>> { match i { "" => Ok(None), // open end of range _ => Ok(Some( NonZeroU32::new( i.parse() .with_context(|| format!("couldn't parse partition index '{i}'"))?, ) .context("partition index cannot be zero")?, )), } }; for range in indexes { let parts: Vec<&str> = range.split('-').collect(); let filter = match parts.len() { 1 => Index(parse_index(parts[0])?, parse_index(parts[0])?), 2 => Index(parse_index(parts[0])?, parse_index(parts[1])?), _ => bail!("couldn't parse partition index range '{}'", range), }; match filter { Index(None, None) => bail!( "both ends of partition index range '{}' cannot be open", range ), Index(Some(x), Some(y)) if x > y => bail!( "start of partition index range '{}' cannot be greater than end", range ), _ => filters.push(filter), }; } Ok(filters) } fn ensure_exclusive_access(device: &str) -> Result<()> { let mut parts = Disk::new(device)?.get_busy_partitions()?; if parts.is_empty() { return Ok(()); } parts.sort_unstable_by_key(|p| p.path.to_string()); eprintln!("Partitions in use on {device}:"); for part in parts { if let Some(mountpoint) = part.mountpoint.as_ref() { eprintln!(" {} mounted on {}", part.path, mountpoint); } if part.swap { eprintln!(" {} is swap device", part.path); } for holder in part.get_holders()? { eprintln!(" {} in use by {}", part.path, holder); } } bail!("found busy partitions"); } /// Copy the image source to the target disk and do all post-processing. /// If this function fails, the caller should wipe the partition table /// to ensure the user doesn't boot from a partially-written disk. fn write_disk( config: &InstallConfig, source: &mut ImageSource, dest: &mut File, table: &mut dyn PartTable, saved: &SavedPartitions, ignition: Option<File>, network_config: Option<&str>, ) -> Result<()> { let device = config.dest_device.as_deref().expect("device missing"); // Get sector size of destination, for comparing with image let sector_size = get_sector_size(dest)?; // copy the image #[allow(clippy::match_bool, clippy::match_single_binding)] let image_copy = match is_dasd(device, Some(dest))? { #[cfg(target_arch = "s390x")] true => s390x::image_copy_s390x, _ => image_copy_default, }; write_image( source, dest, Path::new(device), image_copy, true, Some(saved), Some(sector_size), VerifyKeys::Production, )?; table.reread()?; // postprocess if ignition.is_some() || config.firstboot_args.is_some() ||!config.append_karg.is_empty() ||!config.delete_karg.is_empty() || config.platform.is_some() ||!config.console.is_empty() || network_config.is_some() || cfg!(target_arch = "s390x") { let mount = Disk::new(device)?.mount_partition_by_label("boot", mount::MsFlags::empty())?; if let Some(ignition) = ignition.as_ref() { write_ignition(mount.mountpoint(), &config.ignition_hash, ignition) .context("writing Ignition configuration")?; } if let Some(platform) = config.platform.as_ref() { write_platform(mount.mountpoint(), platform).context("writing platform ID")?; } if config.platform.is_some() ||!config.console.is_empty() { write_console( mount.mountpoint(), config.platform.as_deref(), &config.console, ) .context("configuring console")?; } if let Some(firstboot_args) = config.firstboot_args.as_ref() { write_firstboot_kargs(mount.mountpoint(), firstboot_args) .context("writing firstboot kargs")?; } if!config.append_karg.is_empty() ||!config.delete_karg.is_empty() { eprintln!("Modifying kernel arguments"); Console::maybe_warn_on_kargs(&config.append_karg, "--append-karg", "--console"); visit_bls_entry_options(mount.mountpoint(), |orig_options: &str| { KargsEditor::new() .append(config.append_karg.as_slice()) .delete(config.delete_karg.as_slice()) .maybe_apply_to(orig_options) }) .context("deleting and appending kargs")?; } if let Some(network_config) = network_config.as_ref() { copy_network_config(mount.mountpoint(), network_config)?; } #[cfg(target_arch = "s390x")] { s390x::zipl( mount.mountpoint(), None, None, s390x::ZiplSecexMode::Disable, None, )?; s390x::chreipl(device)?; } } // detect any latent write errors dest.sync_all().context("syncing data to disk")?; Ok(()) } /// Write the Ignition config. fn write_ignition( mountpoint: &Path, digest_in: &Option<IgnitionHash>, mut config_in: &File, ) -> Result<()> { eprintln!("Writing Ignition config"); // Verify configuration digest, if any. if let Some(digest) = &digest_in { digest .validate(&mut config_in) .context("failed to validate Ignition configuration digest")?; config_in .rewind() .context("rewinding Ignition configuration file")?; }; // make parent directory let mut config_dest = mountpoint.to_path_buf(); config_dest.push("ignition"); if!config_dest.is_dir() { fs::create_dir_all(&config_dest).with_context(|| { format!( "creating Ignition config directory {}", config_dest.display() ) })?; // Ignition data may contain secrets; restrict to root fs::set_permissions(&config_dest, Permissions::from_mode(0o700)).with_context(|| { format!( "setting file mode for Ignition directory {}", config_dest.display() ) })?; } // do the copy config_dest.push("config.ign"); let mut config_out = OpenOptions::new() .write(true) .create_new(true) .open(&config_dest) .with_context(|| { format!( "opening destination Ignition config {}", config_dest.display() ) })?; // Ignition config may contain secrets; restrict to root fs::set_permissions(&config_dest, Permissions::from_mode(0o600)).with_context(|| { format!( "setting file mode for destination Ignition config {}", config_dest.display() ) })?; io::copy(&mut config_in, &mut config_out).context("writing Ignition config")?; Ok(()) } /// Write first-boot kernel arguments. fn write_firstboot_kargs(mountpoint: &Path, args: &str) -> Result<()> { eprintln!("Writing first-boot kernel arguments"); // write the arguments let mut config_dest = mountpoint.to_path_buf(); config_dest.push("ignition.firstboot"); // if the file doesn't already exist, fail, since our assumptions // are wrong let mut config_out = OpenOptions::new() .append(true) .open(&config_dest) .with_context(|| format!("opening first-boot file {}", config_dest.display()))?; let contents = format!("set ignition_network_kcmdline=\"{args}\"\n"); config_out .write_all(contents.as_bytes()) .context("writing first-boot kernel arguments")?; Ok(()) } #[derive(Clone, Default, Deserialize)] struct PlatformSpec { #[serde(default)] grub_commands: Vec<String>, #[serde(default)] kernel_arguments: Vec<String>, } /// Override the platform ID. fn write_platform(mountpoint: &Path, platform: &str) -> Result<()> { // early return if setting the platform to the default value, since // otherwise we'll think we failed to set it if platform == "metal" { return Ok(()); } eprintln!("Setting platform to {platform}"); // We assume that we will only install from metal images and that the // bootloader configs will always set ignition.platform.id. visit_bls_entry_options(mountpoint, |orig_options: &str| { let new_options = KargsEditor::new() .replace(&[format!("ignition.platform.id=metal={platform}")]) .apply_to(orig_options) .context("setting platform ID argument")?; if orig_options == new_options { bail!("couldn't locate platform ID"); } Ok(Some(new_options)) })?; Ok(()) } /// Configure console kernel arguments and GRUB commands. fn write_console(mountpoint: &Path, platform: Option<&str>, consoles: &[Console]) -> Result<()> { // read platforms table let platforms = match fs::read_to_string(mountpoint.join("coreos/platforms.json")) { Ok(json) => serde_json::from_str::<HashMap<String, PlatformSpec>>(&json) .context("parsing platform table")?, // no table for this image? Err(e) if e.kind() == std::io::ErrorKind::NotFound => Default::default(), Err(e) => return Err(e).context("reading platform table"), }; let mut kargs = Vec::new(); let mut grub_commands = Vec::new(); if!consoles.is_empty() { // custom console settings completely override platform-specific // defaults let mut grub_terminals = Vec::new(); for console in consoles { kargs.push(console.karg()); if let Some(cmd) = console.grub_command() { grub_commands.push(cmd); } grub_terminals.push(console.grub_terminal()); } grub_terminals.sort_unstable(); grub_terminals.dedup(); for direction in ["input", "output"] { grub_commands.push(format!("terminal_{direction} {}", grub_terminals.join(" "))); } } else if let Some(platform) = platform { // platform-specific defaults if platform == "metal" { // we're just being asked to apply the defaults which are already // applied return Ok(()); } let spec = platforms.get(platform).cloned().unwrap_or_default(); kargs.extend(spec.kernel_arguments);
.map(|s| s.as_str())
random_line_split
lib.rs
//! The Starling JavaScript runtime. //! // `error_chain!` can recurse deeply #![recursion_limit = "1024"] #![deny(missing_docs)] #![deny(missing_debug_implementations)] // Annoying warning emitted from the `error_chain!` macro. #![allow(unused_doc_comment)] #[macro_use] extern crate derive_error_chain; #[macro_use] extern crate futures; extern crate futures_cpupool; #[macro_use] extern crate js; #[macro_use] extern crate lazy_static; extern crate num_cpus; #[macro_use] extern crate state_machine_future; extern crate tokio_core; extern crate tokio_timer; extern crate void; #[macro_use] pub mod js_native; mod error; mod future_ext; pub mod gc_roots; pub(crate) mod js_global; pub mod promise_future_glue; pub(crate) mod promise_tracker; pub(crate) mod task; pub use error::*; use futures::{Sink, Stream}; use futures::sync::mpsc; use futures_cpupool::CpuPool; use std::cmp; use std::collections::HashMap; use std::fmt; use std::mem; use std::path; use std::sync::Arc; use std::thread; /// Configuration options for building a Starling event loop. /// /// ``` /// extern crate starling; /// /// # fn foo() -> starling::Result<()> { /// // Construct a new `Options` builder, providing the file containing /// // the main JavaScript task. /// starling::Options::new("path/to/main.js") /// // Finish configuring the `Options` builder and run the event /// // loop! /// .run()?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct Options { main: path::PathBuf, sync_io_pool_threads: usize, cpu_pool_threads: usize, channel_buffer_size: usize, } const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8; const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096; impl Options { /// Construct a new `Options` object for configuring the Starling event /// loop. /// /// The given `main` JavaScript file will be evaluated as the main task. pub fn new<P>(main: P) -> Options where P: Into<path::PathBuf>, { Options { main: main.into(), sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS, cpu_pool_threads: num_cpus::get(), channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE, } } /// Configure the number of threads to reserve for the synchronous IO pool. /// /// The synchronous IO pool is a collection of threads for adapting /// synchronous IO libraries into the (otherwise completely asynchronous) /// Starling system. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn sync_io_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.sync_io_pool_threads = threads; self } /// Configure the number of threads to reserve for the CPU pool. /// /// The CPU pool is a collection of worker threads for CPU-bound native Rust /// tasks. /// /// Defaults to the number of logical CPUs on the machine. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn cpu_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.cpu_pool_threads = threads; self } /// Configure the size of mpsc buffers in the system. /// /// ### Panics /// /// Panics if `size` is 0. pub fn channel_buffer_size(mut self, size: usize) -> Self { assert!(size > 0); self.channel_buffer_size = size; self } /// Finish this `Options` builder and run the Starling event loop with its /// specified configuration. pub fn run(self) -> Result<()> { Starling::new(self)?.run() } } impl Options { // Get the number of `T`s that should be buffered in an mpsc channel for the // current configuration. fn buffer_capacity_for<T>(&self) -> usize { let size_of_t = cmp::max(1, mem::size_of::<T>()); let capacity = self.channel_buffer_size / size_of_t; cmp::max(1, capacity) } } /// The Starling supervisory thread. /// /// The supervisory thread doesn't do much other than supervise other threads: the IO /// event loop thread, various utility thread pools, and JavaScript task /// threads. Its primary responsibility is ensuring clean system shutdown and /// joining thread handles. pub(crate) struct Starling { handle: StarlingHandle, receiver: mpsc::Receiver<StarlingMessage>, // Currently there is a 1:1 mapping between JS tasks and native // threads. That is expected to change in the future, hence the // distinction between `self.tasks` and `self.threads`. tasks: HashMap<task::TaskId, task::TaskHandle>, threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>, } impl fmt::Debug for Starling { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Starling {{.. }}") } } impl Starling { /// Construct a Starling system from the given options. pub fn new(opts: Options) -> Result<Starling> { let tasks = HashMap::new(); let threads = HashMap::new(); let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>()); let Options { sync_io_pool_threads, cpu_pool_threads, .. } = opts; let handle = StarlingHandle { options: Arc::new(opts), sync_io_pool: CpuPool::new(sync_io_pool_threads), cpu_pool: CpuPool::new(cpu_pool_threads), sender, }; Ok(Starling { handle, receiver, tasks, threads, }) } /// Run the main Starling event loop with the specified options. pub fn run(mut self) -> Result<()> { let (main, thread) = task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?; self.tasks.insert(main.id(), main.clone()); self.threads.insert(thread.thread().id(), thread); for msg in self.receiver.wait() { let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?; match msg { StarlingMessage::TaskFinished(id) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the finished task"); join_handle .join() .expect("should join finished task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Ok(()); } } StarlingMessage::TaskErrored(id, error) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the errored task"); join_handle .join() .expect("should join errored task's thread OK"); if id == main.id()
} StarlingMessage::NewTask(task, join_handle) => { self.tasks.insert(task.id(), task); self.threads.insert(join_handle.thread().id(), join_handle); } } } Ok(()) } } /// Messages that threads can send to the Starling supervisory thread. /// /// This needs to be `pub` because it is used in a trait implementation; don't /// actually use it! #[derive(Debug)] #[doc(hidden)] pub enum StarlingMessage { /// The task on the given thread completed successfully. TaskFinished(task::TaskId), /// The task on the given thread failed with the given error. TaskErrored(task::TaskId, Error), /// A new child task was created. NewTask(task::TaskHandle, thread::JoinHandle<()>), } /// A handle to the Starling system. /// /// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn /// work in one of the utility thread pools, and communicate with the Starling /// supervisory thread. Handles can be cloned and sent across threads, /// propagating these capabilities. #[derive(Clone)] pub(crate) struct StarlingHandle { options: Arc<Options>, sync_io_pool: CpuPool, cpu_pool: CpuPool, sender: mpsc::Sender<StarlingMessage>, } impl fmt::Debug for StarlingHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StarlingHandle {{.. }}") } } impl StarlingHandle { /// Get the `Options` that this Starling system was configured with. pub fn options(&self) -> &Arc<Options> { &self.options } /// Get a handle to the thread pool for adapting synchronous IO (perhaps /// from a library that wasn't written to be async) into the system. pub fn sync_io_pool(&self) -> &CpuPool { &self.sync_io_pool } /// Get a handle to the thread pool for performing CPU-bound native Rust /// tasks. pub fn cpu_pool(&self) -> &CpuPool { &self.cpu_pool } /// Send a message to the Starling supervisory thread. pub fn send(&self, msg: StarlingMessage) -> futures::sink::Send<mpsc::Sender<StarlingMessage>> { self.sender.clone().send(msg) } } #[cfg(test)] mod tests { use super::*; use task::{TaskHandle, TaskMessage}; fn assert_clone<T: Clone>() {} fn assert_send<T: Send>() {} #[test] fn error_is_send() { assert_send::<Error>(); } #[test] fn options_is_send_clone() { assert_clone::<Options>(); assert_send::<Options>(); } #[test] fn starling_handle_is_send_clone() { assert_clone::<StarlingHandle>(); assert_send::<StarlingHandle>(); } #[test] fn task_handle_is_send_clone() { assert_clone::<TaskHandle>(); assert_send::<TaskHandle>(); } #[test] fn starling_message_is_send() { assert_send::<StarlingMessage>(); } #[test] fn task_message_is_send() { assert_send::<TaskMessage>(); } }
{ // TODO: notification of shutdown and joining other threads and things. return Err(error); }
conditional_block
lib.rs
//! The Starling JavaScript runtime. //! // `error_chain!` can recurse deeply #![recursion_limit = "1024"] #![deny(missing_docs)] #![deny(missing_debug_implementations)] // Annoying warning emitted from the `error_chain!` macro. #![allow(unused_doc_comment)] #[macro_use] extern crate derive_error_chain; #[macro_use] extern crate futures; extern crate futures_cpupool; #[macro_use] extern crate js; #[macro_use] extern crate lazy_static; extern crate num_cpus; #[macro_use] extern crate state_machine_future; extern crate tokio_core; extern crate tokio_timer; extern crate void; #[macro_use] pub mod js_native; mod error; mod future_ext; pub mod gc_roots; pub(crate) mod js_global; pub mod promise_future_glue; pub(crate) mod promise_tracker; pub(crate) mod task; pub use error::*; use futures::{Sink, Stream}; use futures::sync::mpsc; use futures_cpupool::CpuPool; use std::cmp; use std::collections::HashMap; use std::fmt; use std::mem; use std::path; use std::sync::Arc; use std::thread; /// Configuration options for building a Starling event loop. /// /// ``` /// extern crate starling; /// /// # fn foo() -> starling::Result<()> { /// // Construct a new `Options` builder, providing the file containing /// // the main JavaScript task. /// starling::Options::new("path/to/main.js") /// // Finish configuring the `Options` builder and run the event /// // loop! /// .run()?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct Options { main: path::PathBuf, sync_io_pool_threads: usize, cpu_pool_threads: usize, channel_buffer_size: usize, } const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8; const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096; impl Options { /// Construct a new `Options` object for configuring the Starling event /// loop. /// /// The given `main` JavaScript file will be evaluated as the main task. pub fn new<P>(main: P) -> Options where P: Into<path::PathBuf>, { Options { main: main.into(), sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS, cpu_pool_threads: num_cpus::get(), channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE, } } /// Configure the number of threads to reserve for the synchronous IO pool. /// /// The synchronous IO pool is a collection of threads for adapting /// synchronous IO libraries into the (otherwise completely asynchronous) /// Starling system. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn sync_io_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.sync_io_pool_threads = threads; self } /// Configure the number of threads to reserve for the CPU pool. /// /// The CPU pool is a collection of worker threads for CPU-bound native Rust /// tasks. /// /// Defaults to the number of logical CPUs on the machine. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn cpu_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.cpu_pool_threads = threads; self } /// Configure the size of mpsc buffers in the system. /// /// ### Panics /// /// Panics if `size` is 0. pub fn channel_buffer_size(mut self, size: usize) -> Self { assert!(size > 0); self.channel_buffer_size = size; self } /// Finish this `Options` builder and run the Starling event loop with its /// specified configuration. pub fn run(self) -> Result<()> { Starling::new(self)?.run() } } impl Options { // Get the number of `T`s that should be buffered in an mpsc channel for the // current configuration. fn buffer_capacity_for<T>(&self) -> usize { let size_of_t = cmp::max(1, mem::size_of::<T>()); let capacity = self.channel_buffer_size / size_of_t; cmp::max(1, capacity) } } /// The Starling supervisory thread. /// /// The supervisory thread doesn't do much other than supervise other threads: the IO /// event loop thread, various utility thread pools, and JavaScript task /// threads. Its primary responsibility is ensuring clean system shutdown and /// joining thread handles. pub(crate) struct Starling { handle: StarlingHandle, receiver: mpsc::Receiver<StarlingMessage>, // Currently there is a 1:1 mapping between JS tasks and native // threads. That is expected to change in the future, hence the // distinction between `self.tasks` and `self.threads`. tasks: HashMap<task::TaskId, task::TaskHandle>, threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>, } impl fmt::Debug for Starling { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Starling {{.. }}") } } impl Starling { /// Construct a Starling system from the given options. pub fn new(opts: Options) -> Result<Starling> { let tasks = HashMap::new(); let threads = HashMap::new(); let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>()); let Options { sync_io_pool_threads, cpu_pool_threads, .. } = opts; let handle = StarlingHandle { options: Arc::new(opts), sync_io_pool: CpuPool::new(sync_io_pool_threads), cpu_pool: CpuPool::new(cpu_pool_threads), sender, }; Ok(Starling { handle, receiver, tasks, threads, }) } /// Run the main Starling event loop with the specified options. pub fn run(mut self) -> Result<()> { let (main, thread) = task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?; self.tasks.insert(main.id(), main.clone()); self.threads.insert(thread.thread().id(), thread); for msg in self.receiver.wait() { let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?; match msg { StarlingMessage::TaskFinished(id) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the finished task"); join_handle .join() .expect("should join finished task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Ok(()); } } StarlingMessage::TaskErrored(id, error) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the errored task"); join_handle .join() .expect("should join errored task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Err(error); } } StarlingMessage::NewTask(task, join_handle) => { self.tasks.insert(task.id(), task); self.threads.insert(join_handle.thread().id(), join_handle); } } } Ok(()) } } /// Messages that threads can send to the Starling supervisory thread. /// /// This needs to be `pub` because it is used in a trait implementation; don't /// actually use it! #[derive(Debug)] #[doc(hidden)] pub enum StarlingMessage { /// The task on the given thread completed successfully. TaskFinished(task::TaskId), /// The task on the given thread failed with the given error. TaskErrored(task::TaskId, Error), /// A new child task was created. NewTask(task::TaskHandle, thread::JoinHandle<()>), } /// A handle to the Starling system. /// /// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn /// work in one of the utility thread pools, and communicate with the Starling /// supervisory thread. Handles can be cloned and sent across threads, /// propagating these capabilities. #[derive(Clone)] pub(crate) struct StarlingHandle { options: Arc<Options>, sync_io_pool: CpuPool, cpu_pool: CpuPool, sender: mpsc::Sender<StarlingMessage>, } impl fmt::Debug for StarlingHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StarlingHandle {{.. }}") } } impl StarlingHandle { /// Get the `Options` that this Starling system was configured with. pub fn options(&self) -> &Arc<Options> { &self.options } /// Get a handle to the thread pool for adapting synchronous IO (perhaps /// from a library that wasn't written to be async) into the system. pub fn sync_io_pool(&self) -> &CpuPool { &self.sync_io_pool } /// Get a handle to the thread pool for performing CPU-bound native Rust /// tasks. pub fn cpu_pool(&self) -> &CpuPool { &self.cpu_pool } /// Send a message to the Starling supervisory thread. pub fn send(&self, msg: StarlingMessage) -> futures::sink::Send<mpsc::Sender<StarlingMessage>> { self.sender.clone().send(msg) } } #[cfg(test)] mod tests { use super::*; use task::{TaskHandle, TaskMessage}; fn assert_clone<T: Clone>() {} fn assert_send<T: Send>() {} #[test] fn error_is_send() { assert_send::<Error>(); } #[test] fn options_is_send_clone() { assert_clone::<Options>(); assert_send::<Options>(); } #[test] fn starling_handle_is_send_clone() { assert_clone::<StarlingHandle>(); assert_send::<StarlingHandle>(); } #[test] fn task_handle_is_send_clone() { assert_clone::<TaskHandle>(); assert_send::<TaskHandle>(); } #[test] fn starling_message_is_send()
#[test] fn task_message_is_send() { assert_send::<TaskMessage>(); } }
{ assert_send::<StarlingMessage>(); }
identifier_body
lib.rs
//! The Starling JavaScript runtime. //! // `error_chain!` can recurse deeply #![recursion_limit = "1024"] #![deny(missing_docs)] #![deny(missing_debug_implementations)] // Annoying warning emitted from the `error_chain!` macro. #![allow(unused_doc_comment)] #[macro_use] extern crate derive_error_chain; #[macro_use] extern crate futures; extern crate futures_cpupool; #[macro_use] extern crate js; #[macro_use] extern crate lazy_static; extern crate num_cpus; #[macro_use] extern crate state_machine_future; extern crate tokio_core; extern crate tokio_timer; extern crate void; #[macro_use] pub mod js_native; mod error; mod future_ext; pub mod gc_roots; pub(crate) mod js_global; pub mod promise_future_glue; pub(crate) mod promise_tracker; pub(crate) mod task; pub use error::*; use futures::{Sink, Stream}; use futures::sync::mpsc; use futures_cpupool::CpuPool; use std::cmp; use std::collections::HashMap; use std::fmt; use std::mem; use std::path; use std::sync::Arc; use std::thread; /// Configuration options for building a Starling event loop. /// /// ``` /// extern crate starling; /// /// # fn foo() -> starling::Result<()> { /// // Construct a new `Options` builder, providing the file containing /// // the main JavaScript task. /// starling::Options::new("path/to/main.js") /// // Finish configuring the `Options` builder and run the event /// // loop! /// .run()?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct Options { main: path::PathBuf, sync_io_pool_threads: usize, cpu_pool_threads: usize, channel_buffer_size: usize, } const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8; const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096; impl Options { /// Construct a new `Options` object for configuring the Starling event /// loop. /// /// The given `main` JavaScript file will be evaluated as the main task. pub fn new<P>(main: P) -> Options where P: Into<path::PathBuf>, { Options { main: main.into(), sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS, cpu_pool_threads: num_cpus::get(), channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE, } } /// Configure the number of threads to reserve for the synchronous IO pool. /// /// The synchronous IO pool is a collection of threads for adapting /// synchronous IO libraries into the (otherwise completely asynchronous) /// Starling system. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn sync_io_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.sync_io_pool_threads = threads; self } /// Configure the number of threads to reserve for the CPU pool. /// /// The CPU pool is a collection of worker threads for CPU-bound native Rust /// tasks. /// /// Defaults to the number of logical CPUs on the machine. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn cpu_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.cpu_pool_threads = threads; self } /// Configure the size of mpsc buffers in the system. /// /// ### Panics /// /// Panics if `size` is 0. pub fn channel_buffer_size(mut self, size: usize) -> Self { assert!(size > 0); self.channel_buffer_size = size; self } /// Finish this `Options` builder and run the Starling event loop with its /// specified configuration. pub fn run(self) -> Result<()> { Starling::new(self)?.run() } } impl Options { // Get the number of `T`s that should be buffered in an mpsc channel for the // current configuration. fn buffer_capacity_for<T>(&self) -> usize { let size_of_t = cmp::max(1, mem::size_of::<T>()); let capacity = self.channel_buffer_size / size_of_t; cmp::max(1, capacity) } } /// The Starling supervisory thread. /// /// The supervisory thread doesn't do much other than supervise other threads: the IO /// event loop thread, various utility thread pools, and JavaScript task /// threads. Its primary responsibility is ensuring clean system shutdown and /// joining thread handles. pub(crate) struct Starling { handle: StarlingHandle, receiver: mpsc::Receiver<StarlingMessage>, // Currently there is a 1:1 mapping between JS tasks and native // threads. That is expected to change in the future, hence the // distinction between `self.tasks` and `self.threads`. tasks: HashMap<task::TaskId, task::TaskHandle>, threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>, } impl fmt::Debug for Starling { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Starling {{.. }}") } } impl Starling { /// Construct a Starling system from the given options. pub fn new(opts: Options) -> Result<Starling> { let tasks = HashMap::new(); let threads = HashMap::new(); let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>()); let Options { sync_io_pool_threads, cpu_pool_threads, .. } = opts; let handle = StarlingHandle {
cpu_pool: CpuPool::new(cpu_pool_threads), sender, }; Ok(Starling { handle, receiver, tasks, threads, }) } /// Run the main Starling event loop with the specified options. pub fn run(mut self) -> Result<()> { let (main, thread) = task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?; self.tasks.insert(main.id(), main.clone()); self.threads.insert(thread.thread().id(), thread); for msg in self.receiver.wait() { let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?; match msg { StarlingMessage::TaskFinished(id) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the finished task"); join_handle .join() .expect("should join finished task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Ok(()); } } StarlingMessage::TaskErrored(id, error) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the errored task"); join_handle .join() .expect("should join errored task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Err(error); } } StarlingMessage::NewTask(task, join_handle) => { self.tasks.insert(task.id(), task); self.threads.insert(join_handle.thread().id(), join_handle); } } } Ok(()) } } /// Messages that threads can send to the Starling supervisory thread. /// /// This needs to be `pub` because it is used in a trait implementation; don't /// actually use it! #[derive(Debug)] #[doc(hidden)] pub enum StarlingMessage { /// The task on the given thread completed successfully. TaskFinished(task::TaskId), /// The task on the given thread failed with the given error. TaskErrored(task::TaskId, Error), /// A new child task was created. NewTask(task::TaskHandle, thread::JoinHandle<()>), } /// A handle to the Starling system. /// /// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn /// work in one of the utility thread pools, and communicate with the Starling /// supervisory thread. Handles can be cloned and sent across threads, /// propagating these capabilities. #[derive(Clone)] pub(crate) struct StarlingHandle { options: Arc<Options>, sync_io_pool: CpuPool, cpu_pool: CpuPool, sender: mpsc::Sender<StarlingMessage>, } impl fmt::Debug for StarlingHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StarlingHandle {{.. }}") } } impl StarlingHandle { /// Get the `Options` that this Starling system was configured with. pub fn options(&self) -> &Arc<Options> { &self.options } /// Get a handle to the thread pool for adapting synchronous IO (perhaps /// from a library that wasn't written to be async) into the system. pub fn sync_io_pool(&self) -> &CpuPool { &self.sync_io_pool } /// Get a handle to the thread pool for performing CPU-bound native Rust /// tasks. pub fn cpu_pool(&self) -> &CpuPool { &self.cpu_pool } /// Send a message to the Starling supervisory thread. pub fn send(&self, msg: StarlingMessage) -> futures::sink::Send<mpsc::Sender<StarlingMessage>> { self.sender.clone().send(msg) } } #[cfg(test)] mod tests { use super::*; use task::{TaskHandle, TaskMessage}; fn assert_clone<T: Clone>() {} fn assert_send<T: Send>() {} #[test] fn error_is_send() { assert_send::<Error>(); } #[test] fn options_is_send_clone() { assert_clone::<Options>(); assert_send::<Options>(); } #[test] fn starling_handle_is_send_clone() { assert_clone::<StarlingHandle>(); assert_send::<StarlingHandle>(); } #[test] fn task_handle_is_send_clone() { assert_clone::<TaskHandle>(); assert_send::<TaskHandle>(); } #[test] fn starling_message_is_send() { assert_send::<StarlingMessage>(); } #[test] fn task_message_is_send() { assert_send::<TaskMessage>(); } }
options: Arc::new(opts), sync_io_pool: CpuPool::new(sync_io_pool_threads),
random_line_split
lib.rs
//! The Starling JavaScript runtime. //! // `error_chain!` can recurse deeply #![recursion_limit = "1024"] #![deny(missing_docs)] #![deny(missing_debug_implementations)] // Annoying warning emitted from the `error_chain!` macro. #![allow(unused_doc_comment)] #[macro_use] extern crate derive_error_chain; #[macro_use] extern crate futures; extern crate futures_cpupool; #[macro_use] extern crate js; #[macro_use] extern crate lazy_static; extern crate num_cpus; #[macro_use] extern crate state_machine_future; extern crate tokio_core; extern crate tokio_timer; extern crate void; #[macro_use] pub mod js_native; mod error; mod future_ext; pub mod gc_roots; pub(crate) mod js_global; pub mod promise_future_glue; pub(crate) mod promise_tracker; pub(crate) mod task; pub use error::*; use futures::{Sink, Stream}; use futures::sync::mpsc; use futures_cpupool::CpuPool; use std::cmp; use std::collections::HashMap; use std::fmt; use std::mem; use std::path; use std::sync::Arc; use std::thread; /// Configuration options for building a Starling event loop. /// /// ``` /// extern crate starling; /// /// # fn foo() -> starling::Result<()> { /// // Construct a new `Options` builder, providing the file containing /// // the main JavaScript task. /// starling::Options::new("path/to/main.js") /// // Finish configuring the `Options` builder and run the event /// // loop! /// .run()?; /// # Ok(()) /// # } /// ``` #[derive(Clone, Debug)] pub struct Options { main: path::PathBuf, sync_io_pool_threads: usize, cpu_pool_threads: usize, channel_buffer_size: usize, } const DEFAULT_SYNC_IO_POOL_THREADS: usize = 8; const DEFAULT_CHANNEL_BUFFER_SIZE: usize = 4096; impl Options { /// Construct a new `Options` object for configuring the Starling event /// loop. /// /// The given `main` JavaScript file will be evaluated as the main task. pub fn new<P>(main: P) -> Options where P: Into<path::PathBuf>, { Options { main: main.into(), sync_io_pool_threads: DEFAULT_SYNC_IO_POOL_THREADS, cpu_pool_threads: num_cpus::get(), channel_buffer_size: DEFAULT_CHANNEL_BUFFER_SIZE, } } /// Configure the number of threads to reserve for the synchronous IO pool. /// /// The synchronous IO pool is a collection of threads for adapting /// synchronous IO libraries into the (otherwise completely asynchronous) /// Starling system. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn sync_io_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.sync_io_pool_threads = threads; self } /// Configure the number of threads to reserve for the CPU pool. /// /// The CPU pool is a collection of worker threads for CPU-bound native Rust /// tasks. /// /// Defaults to the number of logical CPUs on the machine. /// /// ### Panics /// /// Panics if `threads` is 0. pub fn cpu_pool_threads(mut self, threads: usize) -> Self { assert!(threads > 0); self.cpu_pool_threads = threads; self } /// Configure the size of mpsc buffers in the system. /// /// ### Panics /// /// Panics if `size` is 0. pub fn channel_buffer_size(mut self, size: usize) -> Self { assert!(size > 0); self.channel_buffer_size = size; self } /// Finish this `Options` builder and run the Starling event loop with its /// specified configuration. pub fn run(self) -> Result<()> { Starling::new(self)?.run() } } impl Options { // Get the number of `T`s that should be buffered in an mpsc channel for the // current configuration. fn
<T>(&self) -> usize { let size_of_t = cmp::max(1, mem::size_of::<T>()); let capacity = self.channel_buffer_size / size_of_t; cmp::max(1, capacity) } } /// The Starling supervisory thread. /// /// The supervisory thread doesn't do much other than supervise other threads: the IO /// event loop thread, various utility thread pools, and JavaScript task /// threads. Its primary responsibility is ensuring clean system shutdown and /// joining thread handles. pub(crate) struct Starling { handle: StarlingHandle, receiver: mpsc::Receiver<StarlingMessage>, // Currently there is a 1:1 mapping between JS tasks and native // threads. That is expected to change in the future, hence the // distinction between `self.tasks` and `self.threads`. tasks: HashMap<task::TaskId, task::TaskHandle>, threads: HashMap<thread::ThreadId, thread::JoinHandle<()>>, } impl fmt::Debug for Starling { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Starling {{.. }}") } } impl Starling { /// Construct a Starling system from the given options. pub fn new(opts: Options) -> Result<Starling> { let tasks = HashMap::new(); let threads = HashMap::new(); let (sender, receiver) = mpsc::channel(opts.buffer_capacity_for::<StarlingMessage>()); let Options { sync_io_pool_threads, cpu_pool_threads, .. } = opts; let handle = StarlingHandle { options: Arc::new(opts), sync_io_pool: CpuPool::new(sync_io_pool_threads), cpu_pool: CpuPool::new(cpu_pool_threads), sender, }; Ok(Starling { handle, receiver, tasks, threads, }) } /// Run the main Starling event loop with the specified options. pub fn run(mut self) -> Result<()> { let (main, thread) = task::Task::spawn_main(self.handle.clone(), self.handle.options().main.clone())?; self.tasks.insert(main.id(), main.clone()); self.threads.insert(thread.thread().id(), thread); for msg in self.receiver.wait() { let msg = msg.map_err(|_| Error::from_kind(ErrorKind::CouldNotReadValueFromChannel))?; match msg { StarlingMessage::TaskFinished(id) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the finished task"); join_handle .join() .expect("should join finished task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Ok(()); } } StarlingMessage::TaskErrored(id, error) => { assert!(self.tasks.remove(&id).is_some()); let thread_id = id.into(); let join_handle = self.threads .remove(&thread_id) .expect("should have a thread join handle for the errored task"); join_handle .join() .expect("should join errored task's thread OK"); if id == main.id() { // TODO: notification of shutdown and joining other threads and things. return Err(error); } } StarlingMessage::NewTask(task, join_handle) => { self.tasks.insert(task.id(), task); self.threads.insert(join_handle.thread().id(), join_handle); } } } Ok(()) } } /// Messages that threads can send to the Starling supervisory thread. /// /// This needs to be `pub` because it is used in a trait implementation; don't /// actually use it! #[derive(Debug)] #[doc(hidden)] pub enum StarlingMessage { /// The task on the given thread completed successfully. TaskFinished(task::TaskId), /// The task on the given thread failed with the given error. TaskErrored(task::TaskId, Error), /// A new child task was created. NewTask(task::TaskHandle, thread::JoinHandle<()>), } /// A handle to the Starling system. /// /// A `StarlingHandle` is a capability to schedule IO on the event loop, spawn /// work in one of the utility thread pools, and communicate with the Starling /// supervisory thread. Handles can be cloned and sent across threads, /// propagating these capabilities. #[derive(Clone)] pub(crate) struct StarlingHandle { options: Arc<Options>, sync_io_pool: CpuPool, cpu_pool: CpuPool, sender: mpsc::Sender<StarlingMessage>, } impl fmt::Debug for StarlingHandle { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "StarlingHandle {{.. }}") } } impl StarlingHandle { /// Get the `Options` that this Starling system was configured with. pub fn options(&self) -> &Arc<Options> { &self.options } /// Get a handle to the thread pool for adapting synchronous IO (perhaps /// from a library that wasn't written to be async) into the system. pub fn sync_io_pool(&self) -> &CpuPool { &self.sync_io_pool } /// Get a handle to the thread pool for performing CPU-bound native Rust /// tasks. pub fn cpu_pool(&self) -> &CpuPool { &self.cpu_pool } /// Send a message to the Starling supervisory thread. pub fn send(&self, msg: StarlingMessage) -> futures::sink::Send<mpsc::Sender<StarlingMessage>> { self.sender.clone().send(msg) } } #[cfg(test)] mod tests { use super::*; use task::{TaskHandle, TaskMessage}; fn assert_clone<T: Clone>() {} fn assert_send<T: Send>() {} #[test] fn error_is_send() { assert_send::<Error>(); } #[test] fn options_is_send_clone() { assert_clone::<Options>(); assert_send::<Options>(); } #[test] fn starling_handle_is_send_clone() { assert_clone::<StarlingHandle>(); assert_send::<StarlingHandle>(); } #[test] fn task_handle_is_send_clone() { assert_clone::<TaskHandle>(); assert_send::<TaskHandle>(); } #[test] fn starling_message_is_send() { assert_send::<StarlingMessage>(); } #[test] fn task_message_is_send() { assert_send::<TaskMessage>(); } }
buffer_capacity_for
identifier_name
random_state.rs
use core::hash::Hash; cfg_if::cfg_if! { if #[cfg(any( all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)), all(any(target_arch = "arm", target_arch = "aarch64"), any(target_feature = "aes", target_feature = "crypto"), not(miri), feature = "stdsimd") ))] { use crate::aes_hash::*; } else { use crate::fallback_hash::*; } } cfg_if::cfg_if! { if #[cfg(feature = "specialize")]{ use crate::BuildHasherExt; } } cfg_if::cfg_if! { if #[cfg(feature = "std")] { extern crate std as alloc; } else { extern crate alloc; } } #[cfg(feature = "atomic-polyfill")] use atomic_polyfill as atomic; #[cfg(not(feature = "atomic-polyfill"))] use core::sync::atomic; use alloc::boxed::Box; use atomic::{AtomicUsize, Ordering}; use core::any::{Any, TypeId}; use core::fmt; use core::hash::BuildHasher; use core::hash::Hasher; pub(crate) const PI: [u64; 4] = [ 0x243f_6a88_85a3_08d3, 0x1319_8a2e_0370_7344, 0xa409_3822_299f_31d0, 0x082e_fa98_ec4e_6c89, ]; pub(crate) const PI2: [u64; 4] = [ 0x4528_21e6_38d0_1377, 0xbe54_66cf_34e9_0c6c, 0xc0ac_29b7_c97c_50dd, 0x3f84_d5b5_b547_0917, ]; cfg_if::cfg_if! { if #[cfg(all(feature = "compile-time-rng", any(test, fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else if #[cfg(all(feature = "runtime-rng", not(fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use crate::convert::Convert; static SEEDS: OnceBox<[[u64; 4]; 2]> = OnceBox::new(); SEEDS.get_or_init(|| { let mut result: [u8; 64] = [0; 64]; getrandom::getrandom(&mut result).expect("getrandom::getrandom() failed."); Box::new(result.convert()) }) } } else if #[cfg(feature = "compile-time-rng")] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { &[PI, PI2] } } } cfg_if::cfg_if! { if #[cfg(not(all(target_arch = "arm", target_os = "none")))] { use once_cell::race::OnceBox; static RAND_SOURCE: OnceBox<Box<dyn RandomSource + Send + Sync>> = OnceBox::new(); } } /// A supplier of Randomness used for different hashers. /// See [set_random_source]. /// /// If [set_random_source] aHash will default to the best available source of randomness. /// In order this is: /// 1. OS provided random number generator (available if the `runtime-rng` flag is enabled which it is by default) - This should be very strong. /// 2. Strong compile time random numbers used to permute a static "counter". (available if `compile-time-rng` is enabled. /// __Enabling this is recommended if `runtime-rng` is not possible__) /// 3. A static counter that adds the memory address of each [RandomState] created permuted with fixed constants. /// (Similar to above but with fixed keys) - This is the weakest option. The strength of this heavily depends on whether or not ASLR is enabled. /// (Rust enables ASLR by default) pub trait RandomSource { fn gen_hasher_seed(&self) -> usize; } struct DefaultRandomSource { counter: AtomicUsize, } impl DefaultRandomSource { fn new() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(&PI as *const _ as usize), } } #[cfg(all(target_arch = "arm", target_os = "none"))] const fn default() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(PI[3] as usize), } } } impl RandomSource for DefaultRandomSource { cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; let previous = self.counter.load(Ordering::Relaxed); let new = previous.wrapping_add(stack); self.counter.store(new, Ordering::Relaxed); new } } else { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; self.counter.fetch_add(stack, Ordering::Relaxed) } } } } cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { #[inline] fn get_src() -> &'static dyn RandomSource { static RAND_SOURCE: DefaultRandomSource = DefaultRandomSource::default(); &RAND_SOURCE } } else { /// Provides an optional way to manually supply a source of randomness for Hasher keys. /// /// The provided [RandomSource] will be used to be used as a source of randomness by [RandomState] to generate new states. /// If this method is not invoked the standard source of randomness is used as described in the Readme. /// /// The source of randomness can only be set once, and must be set before the first RandomState is created. /// If the source has already been specified `Err` is returned with a `bool` indicating if the set failed because /// method was previously invoked (true) or if the default source is already being used (false). #[cfg(not(all(target_arch = "arm", target_os = "none")))] pub fn set_random_source(source: impl RandomSource + Send + Sync +'static) -> Result<(), bool> { RAND_SOURCE.set(Box::new(Box::new(source))).map_err(|s| s.as_ref().type_id()!= TypeId::of::<&DefaultRandomSource>()) } #[inline] fn get_src() -> &'static dyn RandomSource { RAND_SOURCE.get_or_init(|| Box::new(Box::new(DefaultRandomSource::new()))).as_ref() } } } /// Provides a [Hasher] factory. This is typically used (e.g. by [HashMap]) to create /// [AHasher]s in order to hash the keys of the map. See `build_hasher` below. /// /// [build_hasher]: ahash:: /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap /// /// There are multiple constructors each is documented in more detail below: /// /// | Constructor | Dynamically random? | Seed | /// |---------------|---------------------|------| /// |`new` | Each instance unique|_[RandomSource]_| /// |`generate_with`| Each instance unique|`u64` x 4 + [RandomSource]| /// |`with_seed` | Fixed per process |`u64` + static random number| /// |`with_seeds` | Fixed |`u64` x 4| /// #[derive(Clone)] pub struct RandomState { pub(crate) k0: u64, pub(crate) k1: u64, pub(crate) k2: u64, pub(crate) k3: u64, } impl fmt::Debug for RandomState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("RandomState {.. }") } } impl RandomState { /// Create a new `RandomState` `BuildHasher` using random keys. /// /// Each instance will have a unique set of keys derived from [RandomSource]. /// #[inline] pub fn new() -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); Self::from_keys(&fixed[0], &fixed[1], src.gen_hasher_seed()) } /// Create a new `RandomState` `BuildHasher` based on the provided seeds, but in such a way /// that each time it is called the resulting state will be different and of high quality. /// This allows fixed constant or poor quality seeds to be provided without the problem of different /// `BuildHasher`s being identical or weak. /// /// This is done via permuting the provided values with the value of a static counter and memory address. /// (This makes this method somewhat more expensive than `with_seeds` below which does not do this). /// /// The provided values (k0-k3) do not need to be of high quality but they should not all be the same value. #[inline] pub fn generate_with(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &[k0, k1, k2, k3], src.gen_hasher_seed()) } fn from_keys(a: &[u64; 4], b: &[u64; 4], c: usize) -> RandomState { let &[k0, k1, k2, k3] = a; let mut hasher = AHasher::from_random_state(&RandomState { k0, k1, k2, k3 }); hasher.write_usize(c); let mix = |l: u64, r: u64| { let mut h = hasher.clone(); h.write_u64(l); h.write_u64(r); h.finish() }; RandomState { k0: mix(b[0], b[2]), k1: mix(b[1], b[3]), k2: mix(b[2], b[1]), k3: mix(b[3], b[0]), } } /// Internal. Used by Default. #[inline] pub(crate) fn with_fixed_keys() -> RandomState { let [k0, k1, k2, k3] = get_fixed_seeds()[0]; RandomState { k0, k1, k2, k3 } } /// Build a `RandomState` from a single key. The provided key does not need to be of high quality, /// but all `RandomState`s created from the same key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// This allows for explicitly setting the seed to be used. /// /// Note: This method does not require the provided seed to be strong. #[inline] pub fn with_seed(key: usize) -> RandomState { let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &fixed[1], key) } /// Allows for explicitly setting the seeds to used. /// All `RandomState`s created with the same set of keys key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// Note: If DOS resistance is desired one of these should be a decent quality random number. /// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for /// one or more of the parameters or the same value being passed for more than one parameter. /// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference). #[inline] pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { RandomState { k0: k0 ^ PI2[0], k1: k1 ^ PI2[1], k2: k2 ^ PI2[2], k3: k3 ^ PI2[3], } } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[inline] pub fn hash_one<T: Hash>(&self, x: T) -> u64 where Self: Sized, { use crate::specialize::CallHasher; T::get_hash(&x, self) } } /// Creates an instance of RandomState using keys obtained from the random number generator. /// Each instance created in this way will have a unique set of keys. (But the resulting instance /// can be used to create many hashers each or which will have the same keys.) /// /// This is the same as [RandomState::new()] /// /// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or /// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of /// constructors for [RandomState] must be used. #[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))] impl Default for RandomState { #[inline] fn default() -> Self { Self::new() } } impl BuildHasher for RandomState { type Hasher = AHasher; /// Constructs a new [AHasher] with keys based on this [RandomState] object. /// This means that two different [RandomState]s will will generate /// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher] /// will generate the same hashes for the same input data. /// #[cfg_attr( feature = "std", doc = r##" # Examples ``` use ahash::{AHasher, RandomState}; use std::hash::{Hasher, BuildHasher}; let build_hasher = RandomState::new(); let mut hasher_1 = build_hasher.build_hasher(); let mut hasher_2 = build_hasher.build_hasher(); hasher_1.write_u32(1234); hasher_2.write_u32(1234); assert_eq!(hasher_1.finish(), hasher_2.finish()); let other_build_hasher = RandomState::new(); let mut different_hasher = other_build_hasher.build_hasher(); different_hasher.write_u32(1234); assert_ne!(different_hasher.finish(), hasher_1.finish()); ``` "## )] /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap #[inline] fn build_hasher(&self) -> AHasher { AHasher::from_random_state(self) } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[cfg(feature = "specialize")] #[inline] fn hash_one<T: Hash>(&self, x: T) -> u64 { RandomState::hash_one(self, x) } } #[cfg(feature = "specialize")] impl BuildHasherExt for RandomState { #[inline] fn hash_as_u64<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherU64 { buffer: self.k0, pad: self.k1, }; value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_fixed_length<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherFixed(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_str<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherStr(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } } #[cfg(test)] mod test { use super::*; #[test] fn test_unique() {
#[cfg(all(feature = "runtime-rng", not(all(feature = "compile-time-rng", test))))] #[test] fn test_not_pi() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(feature = "compile-time-rng", any(not(feature = "runtime-rng"), test)))] #[test] fn test_not_pi_const() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(not(feature = "runtime-rng"), not(feature = "compile-time-rng")))] #[test] fn test_pi() { assert_eq!(PI, get_fixed_seeds()[0]); } #[test] fn test_with_seeds_const() { const _CONST_RANDOM_STATE: RandomState = RandomState::with_seeds(17, 19, 21, 23); } }
let a = RandomState::generate_with(1, 2, 3, 4); let b = RandomState::generate_with(1, 2, 3, 4); assert_ne!(a.build_hasher().finish(), b.build_hasher().finish()); }
random_line_split
random_state.rs
use core::hash::Hash; cfg_if::cfg_if! { if #[cfg(any( all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)), all(any(target_arch = "arm", target_arch = "aarch64"), any(target_feature = "aes", target_feature = "crypto"), not(miri), feature = "stdsimd") ))] { use crate::aes_hash::*; } else { use crate::fallback_hash::*; } } cfg_if::cfg_if! { if #[cfg(feature = "specialize")]{ use crate::BuildHasherExt; } } cfg_if::cfg_if! { if #[cfg(feature = "std")] { extern crate std as alloc; } else { extern crate alloc; } } #[cfg(feature = "atomic-polyfill")] use atomic_polyfill as atomic; #[cfg(not(feature = "atomic-polyfill"))] use core::sync::atomic; use alloc::boxed::Box; use atomic::{AtomicUsize, Ordering}; use core::any::{Any, TypeId}; use core::fmt; use core::hash::BuildHasher; use core::hash::Hasher; pub(crate) const PI: [u64; 4] = [ 0x243f_6a88_85a3_08d3, 0x1319_8a2e_0370_7344, 0xa409_3822_299f_31d0, 0x082e_fa98_ec4e_6c89, ]; pub(crate) const PI2: [u64; 4] = [ 0x4528_21e6_38d0_1377, 0xbe54_66cf_34e9_0c6c, 0xc0ac_29b7_c97c_50dd, 0x3f84_d5b5_b547_0917, ]; cfg_if::cfg_if! { if #[cfg(all(feature = "compile-time-rng", any(test, fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else if #[cfg(all(feature = "runtime-rng", not(fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use crate::convert::Convert; static SEEDS: OnceBox<[[u64; 4]; 2]> = OnceBox::new(); SEEDS.get_or_init(|| { let mut result: [u8; 64] = [0; 64]; getrandom::getrandom(&mut result).expect("getrandom::getrandom() failed."); Box::new(result.convert()) }) } } else if #[cfg(feature = "compile-time-rng")] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { &[PI, PI2] } } } cfg_if::cfg_if! { if #[cfg(not(all(target_arch = "arm", target_os = "none")))] { use once_cell::race::OnceBox; static RAND_SOURCE: OnceBox<Box<dyn RandomSource + Send + Sync>> = OnceBox::new(); } } /// A supplier of Randomness used for different hashers. /// See [set_random_source]. /// /// If [set_random_source] aHash will default to the best available source of randomness. /// In order this is: /// 1. OS provided random number generator (available if the `runtime-rng` flag is enabled which it is by default) - This should be very strong. /// 2. Strong compile time random numbers used to permute a static "counter". (available if `compile-time-rng` is enabled. /// __Enabling this is recommended if `runtime-rng` is not possible__) /// 3. A static counter that adds the memory address of each [RandomState] created permuted with fixed constants. /// (Similar to above but with fixed keys) - This is the weakest option. The strength of this heavily depends on whether or not ASLR is enabled. /// (Rust enables ASLR by default) pub trait RandomSource { fn gen_hasher_seed(&self) -> usize; } struct DefaultRandomSource { counter: AtomicUsize, } impl DefaultRandomSource { fn new() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(&PI as *const _ as usize), } } #[cfg(all(target_arch = "arm", target_os = "none"))] const fn default() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(PI[3] as usize), } } } impl RandomSource for DefaultRandomSource { cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; let previous = self.counter.load(Ordering::Relaxed); let new = previous.wrapping_add(stack); self.counter.store(new, Ordering::Relaxed); new } } else { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; self.counter.fetch_add(stack, Ordering::Relaxed) } } } } cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { #[inline] fn get_src() -> &'static dyn RandomSource { static RAND_SOURCE: DefaultRandomSource = DefaultRandomSource::default(); &RAND_SOURCE } } else { /// Provides an optional way to manually supply a source of randomness for Hasher keys. /// /// The provided [RandomSource] will be used to be used as a source of randomness by [RandomState] to generate new states. /// If this method is not invoked the standard source of randomness is used as described in the Readme. /// /// The source of randomness can only be set once, and must be set before the first RandomState is created. /// If the source has already been specified `Err` is returned with a `bool` indicating if the set failed because /// method was previously invoked (true) or if the default source is already being used (false). #[cfg(not(all(target_arch = "arm", target_os = "none")))] pub fn set_random_source(source: impl RandomSource + Send + Sync +'static) -> Result<(), bool> { RAND_SOURCE.set(Box::new(Box::new(source))).map_err(|s| s.as_ref().type_id()!= TypeId::of::<&DefaultRandomSource>()) } #[inline] fn get_src() -> &'static dyn RandomSource { RAND_SOURCE.get_or_init(|| Box::new(Box::new(DefaultRandomSource::new()))).as_ref() } } } /// Provides a [Hasher] factory. This is typically used (e.g. by [HashMap]) to create /// [AHasher]s in order to hash the keys of the map. See `build_hasher` below. /// /// [build_hasher]: ahash:: /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap /// /// There are multiple constructors each is documented in more detail below: /// /// | Constructor | Dynamically random? | Seed | /// |---------------|---------------------|------| /// |`new` | Each instance unique|_[RandomSource]_| /// |`generate_with`| Each instance unique|`u64` x 4 + [RandomSource]| /// |`with_seed` | Fixed per process |`u64` + static random number| /// |`with_seeds` | Fixed |`u64` x 4| /// #[derive(Clone)] pub struct RandomState { pub(crate) k0: u64, pub(crate) k1: u64, pub(crate) k2: u64, pub(crate) k3: u64, } impl fmt::Debug for RandomState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("RandomState {.. }") } } impl RandomState { /// Create a new `RandomState` `BuildHasher` using random keys. /// /// Each instance will have a unique set of keys derived from [RandomSource]. /// #[inline] pub fn new() -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); Self::from_keys(&fixed[0], &fixed[1], src.gen_hasher_seed()) } /// Create a new `RandomState` `BuildHasher` based on the provided seeds, but in such a way /// that each time it is called the resulting state will be different and of high quality. /// This allows fixed constant or poor quality seeds to be provided without the problem of different /// `BuildHasher`s being identical or weak. /// /// This is done via permuting the provided values with the value of a static counter and memory address. /// (This makes this method somewhat more expensive than `with_seeds` below which does not do this). /// /// The provided values (k0-k3) do not need to be of high quality but they should not all be the same value. #[inline] pub fn generate_with(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &[k0, k1, k2, k3], src.gen_hasher_seed()) } fn from_keys(a: &[u64; 4], b: &[u64; 4], c: usize) -> RandomState { let &[k0, k1, k2, k3] = a; let mut hasher = AHasher::from_random_state(&RandomState { k0, k1, k2, k3 }); hasher.write_usize(c); let mix = |l: u64, r: u64| { let mut h = hasher.clone(); h.write_u64(l); h.write_u64(r); h.finish() }; RandomState { k0: mix(b[0], b[2]), k1: mix(b[1], b[3]), k2: mix(b[2], b[1]), k3: mix(b[3], b[0]), } } /// Internal. Used by Default. #[inline] pub(crate) fn with_fixed_keys() -> RandomState { let [k0, k1, k2, k3] = get_fixed_seeds()[0]; RandomState { k0, k1, k2, k3 } } /// Build a `RandomState` from a single key. The provided key does not need to be of high quality, /// but all `RandomState`s created from the same key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// This allows for explicitly setting the seed to be used. /// /// Note: This method does not require the provided seed to be strong. #[inline] pub fn with_seed(key: usize) -> RandomState { let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &fixed[1], key) } /// Allows for explicitly setting the seeds to used. /// All `RandomState`s created with the same set of keys key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// Note: If DOS resistance is desired one of these should be a decent quality random number. /// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for /// one or more of the parameters or the same value being passed for more than one parameter. /// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference). #[inline] pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { RandomState { k0: k0 ^ PI2[0], k1: k1 ^ PI2[1], k2: k2 ^ PI2[2], k3: k3 ^ PI2[3], } } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[inline] pub fn hash_one<T: Hash>(&self, x: T) -> u64 where Self: Sized, { use crate::specialize::CallHasher; T::get_hash(&x, self) } } /// Creates an instance of RandomState using keys obtained from the random number generator. /// Each instance created in this way will have a unique set of keys. (But the resulting instance /// can be used to create many hashers each or which will have the same keys.) /// /// This is the same as [RandomState::new()] /// /// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or /// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of /// constructors for [RandomState] must be used. #[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))] impl Default for RandomState { #[inline] fn default() -> Self { Self::new() } } impl BuildHasher for RandomState { type Hasher = AHasher; /// Constructs a new [AHasher] with keys based on this [RandomState] object. /// This means that two different [RandomState]s will will generate /// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher] /// will generate the same hashes for the same input data. /// #[cfg_attr( feature = "std", doc = r##" # Examples ``` use ahash::{AHasher, RandomState}; use std::hash::{Hasher, BuildHasher}; let build_hasher = RandomState::new(); let mut hasher_1 = build_hasher.build_hasher(); let mut hasher_2 = build_hasher.build_hasher(); hasher_1.write_u32(1234); hasher_2.write_u32(1234); assert_eq!(hasher_1.finish(), hasher_2.finish()); let other_build_hasher = RandomState::new(); let mut different_hasher = other_build_hasher.build_hasher(); different_hasher.write_u32(1234); assert_ne!(different_hasher.finish(), hasher_1.finish()); ``` "## )] /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap #[inline] fn build_hasher(&self) -> AHasher { AHasher::from_random_state(self) } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[cfg(feature = "specialize")] #[inline] fn hash_one<T: Hash>(&self, x: T) -> u64 { RandomState::hash_one(self, x) } } #[cfg(feature = "specialize")] impl BuildHasherExt for RandomState { #[inline] fn hash_as_u64<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherU64 { buffer: self.k0, pad: self.k1, }; value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_fixed_length<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherFixed(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_str<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherStr(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } } #[cfg(test)] mod test { use super::*; #[test] fn test_unique() { let a = RandomState::generate_with(1, 2, 3, 4); let b = RandomState::generate_with(1, 2, 3, 4); assert_ne!(a.build_hasher().finish(), b.build_hasher().finish()); } #[cfg(all(feature = "runtime-rng", not(all(feature = "compile-time-rng", test))))] #[test] fn test_not_pi() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(feature = "compile-time-rng", any(not(feature = "runtime-rng"), test)))] #[test] fn
() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(not(feature = "runtime-rng"), not(feature = "compile-time-rng")))] #[test] fn test_pi() { assert_eq!(PI, get_fixed_seeds()[0]); } #[test] fn test_with_seeds_const() { const _CONST_RANDOM_STATE: RandomState = RandomState::with_seeds(17, 19, 21, 23); } }
test_not_pi_const
identifier_name
random_state.rs
use core::hash::Hash; cfg_if::cfg_if! { if #[cfg(any( all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes", not(miri)), all(any(target_arch = "arm", target_arch = "aarch64"), any(target_feature = "aes", target_feature = "crypto"), not(miri), feature = "stdsimd") ))] { use crate::aes_hash::*; } else { use crate::fallback_hash::*; } } cfg_if::cfg_if! { if #[cfg(feature = "specialize")]{ use crate::BuildHasherExt; } } cfg_if::cfg_if! { if #[cfg(feature = "std")] { extern crate std as alloc; } else { extern crate alloc; } } #[cfg(feature = "atomic-polyfill")] use atomic_polyfill as atomic; #[cfg(not(feature = "atomic-polyfill"))] use core::sync::atomic; use alloc::boxed::Box; use atomic::{AtomicUsize, Ordering}; use core::any::{Any, TypeId}; use core::fmt; use core::hash::BuildHasher; use core::hash::Hasher; pub(crate) const PI: [u64; 4] = [ 0x243f_6a88_85a3_08d3, 0x1319_8a2e_0370_7344, 0xa409_3822_299f_31d0, 0x082e_fa98_ec4e_6c89, ]; pub(crate) const PI2: [u64; 4] = [ 0x4528_21e6_38d0_1377, 0xbe54_66cf_34e9_0c6c, 0xc0ac_29b7_c97c_50dd, 0x3f84_d5b5_b547_0917, ]; cfg_if::cfg_if! { if #[cfg(all(feature = "compile-time-rng", any(test, fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else if #[cfg(all(feature = "runtime-rng", not(fuzzing)))] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use crate::convert::Convert; static SEEDS: OnceBox<[[u64; 4]; 2]> = OnceBox::new(); SEEDS.get_or_init(|| { let mut result: [u8; 64] = [0; 64]; getrandom::getrandom(&mut result).expect("getrandom::getrandom() failed."); Box::new(result.convert()) }) } } else if #[cfg(feature = "compile-time-rng")] { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { use const_random::const_random; const RAND: [[u64; 4]; 2] = [ [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ], [ const_random!(u64), const_random!(u64), const_random!(u64), const_random!(u64), ] ]; &RAND } } else { #[inline] fn get_fixed_seeds() -> &'static [[u64; 4]; 2] { &[PI, PI2] } } } cfg_if::cfg_if! { if #[cfg(not(all(target_arch = "arm", target_os = "none")))] { use once_cell::race::OnceBox; static RAND_SOURCE: OnceBox<Box<dyn RandomSource + Send + Sync>> = OnceBox::new(); } } /// A supplier of Randomness used for different hashers. /// See [set_random_source]. /// /// If [set_random_source] aHash will default to the best available source of randomness. /// In order this is: /// 1. OS provided random number generator (available if the `runtime-rng` flag is enabled which it is by default) - This should be very strong. /// 2. Strong compile time random numbers used to permute a static "counter". (available if `compile-time-rng` is enabled. /// __Enabling this is recommended if `runtime-rng` is not possible__) /// 3. A static counter that adds the memory address of each [RandomState] created permuted with fixed constants. /// (Similar to above but with fixed keys) - This is the weakest option. The strength of this heavily depends on whether or not ASLR is enabled. /// (Rust enables ASLR by default) pub trait RandomSource { fn gen_hasher_seed(&self) -> usize; } struct DefaultRandomSource { counter: AtomicUsize, } impl DefaultRandomSource { fn new() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(&PI as *const _ as usize), } } #[cfg(all(target_arch = "arm", target_os = "none"))] const fn default() -> DefaultRandomSource { DefaultRandomSource { counter: AtomicUsize::new(PI[3] as usize), } } } impl RandomSource for DefaultRandomSource { cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; let previous = self.counter.load(Ordering::Relaxed); let new = previous.wrapping_add(stack); self.counter.store(new, Ordering::Relaxed); new } } else { fn gen_hasher_seed(&self) -> usize { let stack = self as *const _ as usize; self.counter.fetch_add(stack, Ordering::Relaxed) } } } } cfg_if::cfg_if! { if #[cfg(all(target_arch = "arm", target_os = "none"))] { #[inline] fn get_src() -> &'static dyn RandomSource { static RAND_SOURCE: DefaultRandomSource = DefaultRandomSource::default(); &RAND_SOURCE } } else { /// Provides an optional way to manually supply a source of randomness for Hasher keys. /// /// The provided [RandomSource] will be used to be used as a source of randomness by [RandomState] to generate new states. /// If this method is not invoked the standard source of randomness is used as described in the Readme. /// /// The source of randomness can only be set once, and must be set before the first RandomState is created. /// If the source has already been specified `Err` is returned with a `bool` indicating if the set failed because /// method was previously invoked (true) or if the default source is already being used (false). #[cfg(not(all(target_arch = "arm", target_os = "none")))] pub fn set_random_source(source: impl RandomSource + Send + Sync +'static) -> Result<(), bool> { RAND_SOURCE.set(Box::new(Box::new(source))).map_err(|s| s.as_ref().type_id()!= TypeId::of::<&DefaultRandomSource>()) } #[inline] fn get_src() -> &'static dyn RandomSource { RAND_SOURCE.get_or_init(|| Box::new(Box::new(DefaultRandomSource::new()))).as_ref() } } } /// Provides a [Hasher] factory. This is typically used (e.g. by [HashMap]) to create /// [AHasher]s in order to hash the keys of the map. See `build_hasher` below. /// /// [build_hasher]: ahash:: /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap /// /// There are multiple constructors each is documented in more detail below: /// /// | Constructor | Dynamically random? | Seed | /// |---------------|---------------------|------| /// |`new` | Each instance unique|_[RandomSource]_| /// |`generate_with`| Each instance unique|`u64` x 4 + [RandomSource]| /// |`with_seed` | Fixed per process |`u64` + static random number| /// |`with_seeds` | Fixed |`u64` x 4| /// #[derive(Clone)] pub struct RandomState { pub(crate) k0: u64, pub(crate) k1: u64, pub(crate) k2: u64, pub(crate) k3: u64, } impl fmt::Debug for RandomState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("RandomState {.. }") } } impl RandomState { /// Create a new `RandomState` `BuildHasher` using random keys. /// /// Each instance will have a unique set of keys derived from [RandomSource]. /// #[inline] pub fn new() -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); Self::from_keys(&fixed[0], &fixed[1], src.gen_hasher_seed()) } /// Create a new `RandomState` `BuildHasher` based on the provided seeds, but in such a way /// that each time it is called the resulting state will be different and of high quality. /// This allows fixed constant or poor quality seeds to be provided without the problem of different /// `BuildHasher`s being identical or weak. /// /// This is done via permuting the provided values with the value of a static counter and memory address. /// (This makes this method somewhat more expensive than `with_seeds` below which does not do this). /// /// The provided values (k0-k3) do not need to be of high quality but they should not all be the same value. #[inline] pub fn generate_with(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { let src = get_src(); let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &[k0, k1, k2, k3], src.gen_hasher_seed()) } fn from_keys(a: &[u64; 4], b: &[u64; 4], c: usize) -> RandomState { let &[k0, k1, k2, k3] = a; let mut hasher = AHasher::from_random_state(&RandomState { k0, k1, k2, k3 }); hasher.write_usize(c); let mix = |l: u64, r: u64| { let mut h = hasher.clone(); h.write_u64(l); h.write_u64(r); h.finish() }; RandomState { k0: mix(b[0], b[2]), k1: mix(b[1], b[3]), k2: mix(b[2], b[1]), k3: mix(b[3], b[0]), } } /// Internal. Used by Default. #[inline] pub(crate) fn with_fixed_keys() -> RandomState { let [k0, k1, k2, k3] = get_fixed_seeds()[0]; RandomState { k0, k1, k2, k3 } } /// Build a `RandomState` from a single key. The provided key does not need to be of high quality, /// but all `RandomState`s created from the same key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// This allows for explicitly setting the seed to be used. /// /// Note: This method does not require the provided seed to be strong. #[inline] pub fn with_seed(key: usize) -> RandomState { let fixed = get_fixed_seeds(); RandomState::from_keys(&fixed[0], &fixed[1], key) } /// Allows for explicitly setting the seeds to used. /// All `RandomState`s created with the same set of keys key will produce identical hashers. /// (In contrast to `generate_with` above) /// /// Note: If DOS resistance is desired one of these should be a decent quality random number. /// If 4 high quality random number are not cheaply available this method is robust against 0s being passed for /// one or more of the parameters or the same value being passed for more than one parameter. /// It is recommended to pass numbers in order from highest to lowest quality (if there is any difference). #[inline] pub const fn with_seeds(k0: u64, k1: u64, k2: u64, k3: u64) -> RandomState { RandomState { k0: k0 ^ PI2[0], k1: k1 ^ PI2[1], k2: k2 ^ PI2[2], k3: k3 ^ PI2[3], } } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[inline] pub fn hash_one<T: Hash>(&self, x: T) -> u64 where Self: Sized, { use crate::specialize::CallHasher; T::get_hash(&x, self) } } /// Creates an instance of RandomState using keys obtained from the random number generator. /// Each instance created in this way will have a unique set of keys. (But the resulting instance /// can be used to create many hashers each or which will have the same keys.) /// /// This is the same as [RandomState::new()] /// /// NOTE: For safety this trait impl is only available available if either of the flags `runtime-rng` (on by default) or /// `compile-time-rng` are enabled. This is to prevent weakly keyed maps from being accidentally created. Instead one of /// constructors for [RandomState] must be used. #[cfg(any(feature = "compile-time-rng", feature = "runtime-rng", feature = "no-rng"))] impl Default for RandomState { #[inline] fn default() -> Self { Self::new() } } impl BuildHasher for RandomState { type Hasher = AHasher; /// Constructs a new [AHasher] with keys based on this [RandomState] object. /// This means that two different [RandomState]s will will generate /// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher] /// will generate the same hashes for the same input data. /// #[cfg_attr( feature = "std", doc = r##" # Examples ``` use ahash::{AHasher, RandomState}; use std::hash::{Hasher, BuildHasher}; let build_hasher = RandomState::new(); let mut hasher_1 = build_hasher.build_hasher(); let mut hasher_2 = build_hasher.build_hasher(); hasher_1.write_u32(1234); hasher_2.write_u32(1234); assert_eq!(hasher_1.finish(), hasher_2.finish()); let other_build_hasher = RandomState::new(); let mut different_hasher = other_build_hasher.build_hasher(); different_hasher.write_u32(1234); assert_ne!(different_hasher.finish(), hasher_1.finish()); ``` "## )] /// [Hasher]: std::hash::Hasher /// [BuildHasher]: std::hash::BuildHasher /// [HashMap]: std::collections::HashMap #[inline] fn build_hasher(&self) -> AHasher { AHasher::from_random_state(self) } /// Calculates the hash of a single value. This provides a more convenient (and faster) way to obtain a hash: /// For example: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::BuildHasher; use ahash::RandomState; let hash_builder = RandomState::new(); let hash = hash_builder.hash_one("Some Data"); ``` "## )] /// This is similar to: #[cfg_attr( feature = "std", doc = r##" # Examples ``` use std::hash::{BuildHasher, Hash, Hasher}; use ahash::RandomState; let hash_builder = RandomState::new(); let mut hasher = hash_builder.build_hasher(); "Some Data".hash(&mut hasher); let hash = hasher.finish(); ``` "## )] /// (Note that these two ways to get a hash may not produce the same value for the same data) /// /// This is intended as a convenience for code which *consumes* hashes, such /// as the implementation of a hash table or in unit tests that check /// whether a custom [`Hash`] implementation behaves as expected. /// /// This must not be used in any code which *creates* hashes, such as in an /// implementation of [`Hash`]. The way to create a combined hash of /// multiple values is to call [`Hash::hash`] multiple times using the same /// [`Hasher`], not to call this method repeatedly and combine the results. #[cfg(feature = "specialize")] #[inline] fn hash_one<T: Hash>(&self, x: T) -> u64
} #[cfg(feature = "specialize")] impl BuildHasherExt for RandomState { #[inline] fn hash_as_u64<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherU64 { buffer: self.k0, pad: self.k1, }; value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_fixed_length<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherFixed(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } #[inline] fn hash_as_str<T: Hash +?Sized>(&self, value: &T) -> u64 { let mut hasher = AHasherStr(self.build_hasher()); value.hash(&mut hasher); hasher.finish() } } #[cfg(test)] mod test { use super::*; #[test] fn test_unique() { let a = RandomState::generate_with(1, 2, 3, 4); let b = RandomState::generate_with(1, 2, 3, 4); assert_ne!(a.build_hasher().finish(), b.build_hasher().finish()); } #[cfg(all(feature = "runtime-rng", not(all(feature = "compile-time-rng", test))))] #[test] fn test_not_pi() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(feature = "compile-time-rng", any(not(feature = "runtime-rng"), test)))] #[test] fn test_not_pi_const() { assert_ne!(PI, get_fixed_seeds()[0]); } #[cfg(all(not(feature = "runtime-rng"), not(feature = "compile-time-rng")))] #[test] fn test_pi() { assert_eq!(PI, get_fixed_seeds()[0]); } #[test] fn test_with_seeds_const() { const _CONST_RANDOM_STATE: RandomState = RandomState::with_seeds(17, 19, 21, 23); } }
{ RandomState::hash_one(self, x) }
identifier_body
lib.rs
b fn flush(&mut self) -> Result<(), Error> { if self.bit_count > 0 { self.buffer <<= 8 - self.bit_count; let mut buffer = 0; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", self.buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); println!("{:02x?}", self.output_vector); } } Ok(()) } /* bufferが8ビット(1バイト)溜まった時に実行される */ fn flush_to_output(&mut self) -> Result<(), Error> { let mut buffer = 0; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); } self.buffer = 0; self.bit_count = 0; Ok(()) } } /* 読み込みをbyteで保持するもの buffer: データをMAX_BUFFER_SIZE分取り込むための配列。 buf_count: 現在bufferが何個目まで読まれているかを保持する。 buf_size: bufferの何番目までデータがあるかを保持する flag: 読み込むデータがもうない時に使用する。 file_size: 入力ファイルのサイズを記録する。 input: 入力ファイルの情報を記録する。 */ struct ByteReader<'a, T: Read> { buffer: [u8; MAX_BUFFER_SIZE], buf_count: usize, buf_size: usize, flag: bool, file_size: u32, input: &'a mut T, } impl<'a, T: Read> ByteReader<'a, T> { pub fn new(input: &'a mut T) -> Self { let mut reader = ByteReader { buffer: [0; MAX_BUFFER_SIZE], buf_count: 0, buf_size: 0, flag: true, file_size: 0, input, }; let _ = reader.load_next_byte(); reader } /* bufferが最後まで読まれたり、最初の読み込みの際に実行される。 */ fn load_next_byte(&mut self) -> Result<(), std::io::Error>{ match self.input.read(&mut self.buffer)? { 0 => { self.flag = false; self.buf_size = 0; }, n => { self.file_size += n as u32; self.buf_size = n; self.flag = true; } }; Ok(()) } /* buf_countの位置にあるバイトを返す。 */ pub fn seek_byte(&mut self) -> u8{ self.buffer[self.buf_count] } /* bit_countを進める。bufferの最後まできていた場合には load_next_byteで次のブロックを読み込む。 */ pub fn next_byte(&mut self) { if self.buf_count + 1 < self.buf_size { self.buf_count += 1; } else { let _ = self.load_next_byte(); self.buf_count = 0; } } /* bit_countの位置にあるバイトを返して、next_byteを読みこむ */ pub fn get_byte(&mut self) -> u8 { let buffer = self.buffer[self.buf_count]; self.next_byte(); buffer } } /* Crc32を計算するための構造体 crc32の実装については下のurlを参考に行なった。 https://www.slideshare.net/7shi/crc32 divisor: 除算を行う際に使用するbit列を保持する non_divisor: 除算される側のデータを保持する buffer: とりあえずのデータを保持する buf_count: bufferが何bit処理されたかを保持する first_count: 最初の4バイトは反転する必要があるためカウントする */ struct Crc32 { divisor: u32, non_divisor: u32, buffer: u8, buf_count: u8, first_count: u8, } impl Crc32 { pub fn new() -> Self { Crc32{ divisor: 0b100110000010001110110110111, non_divisor: 0, buffer: 0, buf_count: 0, first_count: 0, } } /* non_divisorやbufferにデータを保持させるもの */ pub fn push_buf(&mut self, buf: u8){ let mut buffer: u8 = 0; for i in 0..8 { buffer <<= 1; buffer |= (buf >> i) & 1; } if self.first_count < 4 { self.non_divisor <<= 8; self.non_divisor +=!buffer as u32; self.first_count += 1; } else { self.buffer = buffer.clone(); self.buf_count = 8; self.bit_shift(); } } /* 先頭bitが立っている場合には除算を行い、それ以外の場合にはbufferのbitを先頭から突っ込む */ fn bit_shift(&mut self){ for i in 0..self.buf_count{ if self.non_divisor >= 2147483648{ self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; self.xor(); } else { self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; } } self.buf_count = 0 } /* 除算を行う。実際にはxor */ fn xor(&mut self){ let buffer = self.non_divisor ^ self.divisor; self.non_divisor = buffer; } /* 現在のnon_divisorからcrc32を計算してそれを返す */ fn get_crc32(&mut self) -> u32 { self.push_buf(0); self.push_buf(0); self.push_buf(0); self.push_buf(0); let mut buffer: u32 = 0; for i in 0..32 { buffer <<= 1; buffer |= (self.non_divisor >> i) & 1; } if PRINT_DEBUG == true { println!("crc32: {:08x?}",!buffer); } !buffer } } /* zipのローカルヘッダーやセントラルヘッダー、エンドセントラルヘッダなどを 保持するための構造体 buffer: ヘッダー情報を保持する before_size: 圧縮前のサイズを保持する after_size: 圧縮後のサイズを保持する filename: ファイルの名前を保持する crc32: crc32の情報を保持する hms: 時間, 分, 秒のデータを保持する ymd: 年, 月, 日のデータを保持する */ struct Header{ buffer: Vec<u8>, before_size: u32, after_size: u32, filename: String, crc32: u32, hms: u16, ymd: u16, } impl Header { pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self { Header{ buffer: Vec::new(), before_size, after_size, filename: filename.into(), crc32, hms, ymd, } } /* 32bitの情報をbufferに追加する */ fn push32(&mut self, num: u32) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); let c = (num >> 16) & (0b11111111); let d = (num >> 24) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); self.buffer.push(c as u8); self.buffer.push(d as u8); } /* 16bitの情報をbufferに追加する */ fn push16(&mut self, num: u16) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); } /* PK0506ヘッダであることを示す情報を追加する */ fn push_pk0506(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x05); self.buffer.push(0x06); } /* PK0304ヘッダであることを示す情報を追加する */ fn push_pk0304(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x03); self.buffer.push(0x04); } /* PK0102ヘッダであることを示す情報を追加する */ fn push_pk0102(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x01); self.buffer.push(0x02); } /* ファイルの名前の情報を追加する */ fn push_filename(&mut self){ let bytes: &[u8] = self.filename.as_bytes(); for i in 0..bytes.len() { self.buffer.push(bytes[i]); } } /* ローカルヘッダーに必要な情報をもらって、ローカルヘッダーを作成する 構造 8byte: PK0304ヘッダを示す情報 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮: 0008) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ(mとする) 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: コメントがあればその長さ(今回はないものとしている) nbyte: ファイル名 mbyte: 圧縮したデータ(ここではpushしておらず、ファイルに書き込む際に追加している) */ pub fn local_header(mut self) -> Vec<u8> { self.push_pk0304(); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push_filename(); self.buffer } /* セントラルヘッダーに必要な情報をもらって、セントラルヘッダーを作成する 8byte: PK0102ヘッダを示す情報 4byte: 作成したバージョン(ここでは2.0としている) 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: 拡張フィールドの長さ。(使用していないため0) 4byte: コメントがあればその長さ(今回はないものとしている) 4byte: 分割されている場合、対応するPK0304ヘッダが格納されたパートの番号 (分割していないため0) 4byte: 対応するPK0304に格納したファイルの属性情報(0としている) 8byte: OSで保持していた対象ファイルの属性情報(0としている) 8byte: 対応するPK0304ヘッダの位置 (今回はファイル一つのみの設定であるため0としている) nbyte: ファイル名 */ pub fn central_header(mut self) -> Vec<u8> { self.push_pk0102(); self.push16(0x0314); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push32(0x00000000); self.push32(0x00000000); self.push_filename(); self.buffer } /* エンドセントラルヘッダーに必要な情報をもらって、エンドセントラルヘッダーを作成する 8byte: PK0506ヘッダを示す情報 4byte: 分割している場合にはこのパートの番号(分割していないため0) 4byte: 分割している場合には最初のPK0304が格納されたパートの番号(分割していないため0) 4byte: 分割時にこのパートに格納されているファイル数(分割していないため下と同じ) 4byte: 圧縮したファイルの数(1としている) 8byte: PK0102ヘッダの合計サイズ 8byte: PK0102ヘッダの開始位置 4byte: コメントの長さ(今回は無し) */ pub fn end_header(mut self, header_size: u32, header_start: u32) -> Vec<u8>{ self.push_pk0506(); self.push16(0x0000); self.push16(0x0000); self.push16(0x0001); self.push16(0x0001); self.push32(header_size); self.push32(header_start); self.push16(0x00); self.buffer } /* cloneの実装を行なっている */ pub fn clone(&self) -> Self { Header::new(self.before_size, self.after_size, self.filename.clone(), self.crc32, self.hms, self.ymd) } } /* ファイルの最終更新日時を取得してそれぞれをzipに必要な形式にして返す。 下のurlのヘッダ構造の部分から形式を知った。 https://hgotoh.jp/wiki/doku.php/documents/other/other-017 */ fn time_data(filename: &str) -> (u16, u16) { let times; if let Ok(metadata) = metadata(filename) { if let Ok(time) = metadata.modified() { if let Ok(epoch) = time.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } else { let now = std::time::SystemTime::now(); if let Ok(epoch) = now.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } } else { times = 0; } let data = Local.timestamp(times as i64, 0); let mut hms = 0; hms += (data.hour() as u32)<< 11; hms += (data.minute() as u32) << 5; hms += (data.second() as u32) / 2; let mut ymd = 0; ymd += ((data.year() - 1980) as u32) << 9; ymd += (data.month() as u32) << 5; ymd += data.day() as u32; (hms as u16, ymd as u16) } /* windowの中にcheckと同じ並びのものがあるかを調べる。 あった際には距離を返す。 */ fn match_check<T: Eq>(window: &[T], check: &[T]) -> isize { if window.len() < check.len(){ return -1; } 'outer: for i in 0..(window.len() - check.len() + 1) { for j in 0..(check.len()){ if window[i + j]!= check[j]{ continue 'outer; } } if PRINT_DEBUG == true { println!("{} {} {}", window.len(), check.len(), i); } return (window.len() - check.len() - i + 1) as isize; } -1 } /* 固定ハフマンに変換する */ fn changer(num: usize) -> (u8, u16) { let (len, re) = match num { 0 ..= 143 => (8, num + 0x30 ), 144..= 255 => (9, num + 0x91 ), 256..= 279 => (7, num - 0x100), 280..= 287 => (8, num - 0x58 ), _ => (0, 512), }; (len, re as u16) } /* 長さから長さ符号と拡張ビットを調べる */ fn length_extra(data: u16) -> (u16, u8, u16){ let (num, len, extra) = match data { 3 ..= 10 => (data + 254, 0, 0), 11 ..= 12 => (265, 1, ((data - 3)) & 0b1), 13 ..= 14 => (266, 1, ((data - 3)) & 0b1), 15 ..= 16 => (267, 1, ((data - 3)) & 0b1), 17 ..= 18 => (268, 1, ((data - 3)) & 0b1), 19 ..= 22 => (269, 2, ((data - 3)) & 0b11), 23 ..= 26 => (270, 2, ((data - 3)) & 0b11), 27 ..= 30 => (271, 2, ((data - 3)) & 0b11), 31 ..= 34 => (272, 2, ((data - 3)) & 0b11), 35 ..= 42 => (273, 3, ((data - 3)) & 0b111), 43 ..= 50 => (274, 3, ((data - 3)) & 0b111), 51 ..= 58 => (275, 3, ((data - 3)) & 0b111), 59 ..= 66 => (276, 3, ((data - 3)) & 0b111), 67 ..= 82 => (277, 4, ((data - 3)) & 0b1111), 83 ..= 98 => (278, 4, ((data - 3)) & 0b1111), 99 ..= 114 => (279, 4, ((data - 3)) & 0b1111), 115..= 130 => (280, 4, ((data - 3)) & 0b1111), 131..= 162 => (281, 5, ((data - 3)) & 0b11111), 163..= 194 => (282, 5, ((data - 3)) & 0b11111), 195..= 226 => (283, 5, ((data - 3)) & 0b11111), 227..= 257 => (284, 5, ((data - 3)) & 0b11111), _ => (286, 6, 0) }; (num as u16,len as u8,extra as u16) } /* 距離から距離符号と拡張ビットを調べる */ fn distance_extra(data: u32) -> (u8, u8, u16){ let (num, dis, extra) = match data { 1 ..= 4 => (data - 1,0, 0), 5 ..= 6 => (4,1, (data - 1) & 0b1), 7 ..= 8 => (5,1, (data - 1) & 0b1), 9 ..= 12 => (6,2, (data - 1) & 0b11), 13 ..= 16 => (7,2, (data - 1) & 0b11), 17 ..= 24 => (8,3, (data - 1) & 0b111), 25 ..= 32 => (9,3, (data - 1
*/ pu
identifier_name
lib.rs
; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", self.buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); println!("{:02x?}", self.output_vector); } } Ok(()) } /* bufferが8ビット(1バイト)溜まった時に実行される */ fn flush_to_output(&mut self) -> Result<(), Error> { let mut buffer = 0; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); } self.buffer = 0; self.bit_count = 0; Ok(()) } } /* 読み込みをbyteで保持するもの buffer: データをMAX_BUFFER_SIZE分取り込むための配列。 buf_count: 現在bufferが何個目まで読まれているかを保持する。 buf_size: bufferの何番目までデータがあるかを保持する flag: 読み込むデータがもうない時に使用する。 file_size: 入力ファイルのサイズを記録する。 input: 入力ファイルの情報を記録する。 */ struct ByteReader<'a, T: Read> { buffer: [u8; MAX_BUFFER_SIZE], buf_count: usize, buf_size: usize, flag: bool, file_size: u32, input: &'a mut T, } impl<'a, T: Read> ByteReader<'a, T> { pub fn new(input: &'a mut T) -> Self { let mut reader = ByteReader { buffer: [0; MAX_BUFFER_SIZE], buf_count: 0, buf_size: 0, flag: true, file_size: 0, input, }; let _ = reader.load_next_byte(); reader } /* bufferが最後まで読まれたり、最初の読み込みの際に実行される。 */ fn load_next_byte(&mut self) -> Result<(), std::io::Error>{ match self.input.read(&mut self.buffer)? { 0 => { self.flag = false; self.buf_size = 0; }, n => { self.file_size += n as u32; self.buf_size = n; self.flag = true; } }; Ok(()) } /* buf_countの位置にあるバイトを返す。 */ pub fn seek_byte(&mut self) -> u8{ self.buffer[self.buf_count] } /* bit_countを進める。bufferの最後まできていた場合には load_next_byteで次のブロックを読み込む。 */ pub fn next_byte(&mut self) { if self.buf_count + 1 < self.buf_size { self.buf_count += 1; } else { let _ = self.load_next_byte(); self.buf_count = 0; } } /* bit_countの位置にあるバイトを返して、next_byteを読みこむ */ pub fn get_byte(&mut self) -> u8 { let buffer = self.buffer[self.buf_count]; self.next_byte(); buffer } } /* Crc32を計算するための構造体 crc32の実装については下のurlを参考に行なった。 https://www.slideshare.net/7shi/crc32 divisor: 除算を行う際に使用するbit列を保持する non_divisor: 除算される側のデータを保持する buffer: とりあえずのデータを保持する buf_count: bufferが何bit処理されたかを保持する first_count: 最初の4バイトは反転する必要があるためカウントする */ struct Crc32 { divisor: u32, non_divisor: u32, buffer: u8, buf_count: u8, first_count: u8, } impl Crc32 { pub fn new() -> Self { Crc32{ divisor: 0b100110000010001110110110111, non_divisor: 0, buffer: 0, buf_count: 0, first_count: 0, } } /* non_divisorやbufferにデータを保持させるもの */ pub fn push_buf(&mut self, buf: u8){ let mut buffer: u8 = 0; for i in 0..8 { buffer <<= 1; buffer |= (buf >> i) & 1; } if self.first_count < 4 { self.non_divisor <<= 8; self.non_divisor +=!buffer as u32; self.first_count += 1; } else { self.buffer = buffer.clone(); self.buf_count = 8; self.bit_shift(); } } /* 先頭bitが立っている場合には除算を行い、それ以外の場合にはbufferのbitを先頭から突っ込む */ fn bit_shift(&mut self){ for i in 0..self.buf_count{ if self.non_divisor >= 2147483648{ self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; self.xor(); } else { self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; } } self.buf_count = 0 } /* 除算を行う。実際にはxor */ fn xor(&mut self){ let buffer = self.non_divisor ^ self.divisor; self.non_divisor = buffer; } /* 現在のnon_divisorからcrc32を計算してそれを返す */ fn get_crc32(&mut self) -> u32 { self.push_buf(0); self.push_buf(0); self.push_buf(0); self.push_buf(0); let mut buffer: u32 = 0; for i in 0..32 { buffer <<= 1; buffer |= (self.non_divisor >> i) & 1; } if PRINT_DEBUG == true { println!("crc32: {:08x?}",!buffer); } !buffer } } /* zipのローカルヘッダーやセントラルヘッダー、エンドセントラルヘッダなどを 保持するための構造体 buffer: ヘッダー情報を保持する before_size: 圧縮前のサイズを保持する after_size: 圧縮後のサイズを保持する filename: ファイルの名前を保持する crc32: crc32の情報を保持する hms: 時間, 分, 秒のデータを保持する ymd: 年, 月, 日のデータを保持する */ struct Header{ buffer: Vec<u8>, before_size: u32, after_size: u32, filename: String, crc32: u32, hms: u16, ymd: u16, } impl Header { pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self { Header{ buffer: Vec::new(), before_size, after_size, filename: filename.into(), crc32, hms, ymd, } } /* 32bitの情報をbufferに追加する */ fn push32(&mut self, num: u32) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); let c = (num >> 16) & (0b11111111); let d = (num >> 24) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); self.buffer.push(c as u8); self.buffer.push(d as u8); } /* 16bitの情報をbufferに追加する */ fn push16(&mut self, num: u16) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); } /* PK0506ヘッダであることを示す情報を追加する */ fn push_pk0506(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x05); self.buffer.push(0x06); } /* PK0304ヘッダであることを示す情報を追加する */ fn push_pk0304(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x03); self.buffer.push(0x04); } /* PK0102ヘッダであることを示す情報を追加する */ fn push_pk0102(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x01); self.buffer.push(0x02); } /* ファイルの名前の情報を追加する */ fn push_filename(&mut self){ let bytes: &[u8] = self.filename.as_bytes(); for i in 0..bytes.len() { self.buffer.push(bytes[i]); } } /* ローカルヘッダーに必要な情報をもらって、ローカルヘッダーを作成する 構造 8byte: PK0304ヘッダを示す情報 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮: 0008) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ(mとする) 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: コメントがあればその長さ(今回はないものとしている) nbyte: ファイル名 mbyte: 圧縮したデータ(ここではpushしておらず、ファイルに書き込む際に追加している) */ pub fn local_header(mut self) -> Vec<u8> { self.push_pk0304(); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push_filename(); self.buffer } /* セントラルヘッダーに必要な情報をもらって、セントラルヘッダーを作成する 8byte: PK0102ヘッダを示す情報 4byte: 作成したバージョン(ここでは2.0としている) 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: 拡張フィールドの長さ。(使用していないため0) 4byte: コメントがあればその長さ(今回はないものとしている) 4byte: 分割されている場合、対応するPK0304ヘッダが格納されたパートの番号 (分割していないため0) 4byte: 対応するPK0304に格納したファイルの属性情報(0としている) 8byte: OSで保持していた対象ファイルの属性情報(0としている) 8byte: 対応するPK0304ヘッダの位置 (今回はファイル一つのみの設定であるため0としている) nbyte: ファイル名 */ pub fn central_header(mut self) -> Vec<u8> { self.push_pk0102(); self.push16(0x0314); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push32(0x00000000); self.push32(0x00000000); self.push_filename(); self.buffer } /* エンドセントラルヘッダーに必要な情報をもらって、エンドセントラルヘッダーを作成する 8byte: PK0506ヘッダを示す情報 4byte: 分割している場合にはこのパートの番号(分割していないため0) 4byte: 分割している場合には最初のPK0304が格納されたパートの番号(分割していないため0) 4byte: 分割時にこのパートに格納されているファイル数(分割していないため下と同じ) 4byte: 圧縮したファイルの数(1としている) 8byte: PK0102ヘッダの合計サイズ 8byte: PK0102ヘッダの開始位置 4byte: コメントの長さ(今回は無し) */ pub fn end_header(mut self, header_size: u32, header_start: u32) -> Vec
f.crc32, self.hms, self.ymd) } } /* ファイルの最終更新日時を取得してそれぞれをzipに必要な形式にして返す。 下のurlのヘッダ構造の部分から形式を知った。 https://hgotoh.jp/wiki/doku.php/documents/other/other-017 */ fn time_data(filename: &str) -> (u16, u16) { let times; if let Ok(metadata) = metadata(filename) { if let Ok(time) = metadata.modified() { if let Ok(epoch) = time.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } else { let now = std::time::SystemTime::now(); if let Ok(epoch) = now.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } } else { times = 0; } let data = Local.timestamp(times as i64, 0); let mut hms = 0; hms += (data.hour() as u32)<< 11; hms += (data.minute() as u32) << 5; hms += (data.second() as u32) / 2; let mut ymd = 0; ymd += ((data.year() - 1980) as u32) << 9; ymd += (data.month() as u32) << 5; ymd += data.day() as u32; (hms as u16, ymd as u16) } /* windowの中にcheckと同じ並びのものがあるかを調べる。 あった際には距離を返す。 */ fn match_check<T: Eq>(window: &[T], check: &[T]) -> isize { if window.len() < check.len(){ return -1; } 'outer: for i in 0..(window.len() - check.len() + 1) { for j in 0..(check.len()){ if window[i + j]!= check[j]{ continue 'outer; } } if PRINT_DEBUG == true { println!("{} {} {}", window.len(), check.len(), i); } return (window.len() - check.len() - i + 1) as isize; } -1 } /* 固定ハフマンに変換する */ fn changer(num: usize) -> (u8, u16) { let (len, re) = match num { 0 ..= 143 => (8, num + 0x30 ), 144..= 255 => (9, num + 0x91 ), 256..= 279 => (7, num - 0x100), 280..= 287 => (8, num - 0x58 ), _ => (0, 512), }; (len, re as u16) } /* 長さから長さ符号と拡張ビットを調べる */ fn length_extra(data: u16) -> (u16, u8, u16){ let (num, len, extra) = match data { 3 ..= 10 => (data + 254, 0, 0), 11 ..= 12 => (265, 1, ((data - 3)) & 0b1), 13 ..= 14 => (266, 1, ((data - 3)) & 0b1), 15 ..= 16 => (267, 1, ((data - 3)) & 0b1), 17 ..= 18 => (268, 1, ((data - 3)) & 0b1), 19 ..= 22 => (269, 2, ((data - 3)) & 0b11), 23 ..= 26 => (270, 2, ((data - 3)) & 0b11), 27 ..= 30 => (271, 2, ((data - 3)) & 0b11), 31 ..= 34 => (272, 2, ((data - 3)) & 0b11), 35 ..= 42 => (273, 3, ((data - 3)) & 0b111), 43 ..= 50 => (274, 3, ((data - 3)) & 0b111), 51 ..= 58 => (275, 3, ((data - 3)) & 0b111), 59 ..= 66 => (276, 3, ((data - 3)) & 0b111), 67 ..= 82 => (277, 4, ((data - 3)) & 0b1111), 83 ..= 98 => (278, 4, ((data - 3)) & 0b1111), 99 ..= 114 => (279, 4, ((data - 3)) & 0b1111), 115..= 130 => (280, 4, ((data - 3)) & 0b1111), 131..= 162 => (281, 5, ((data - 3)) & 0b11111), 163..= 194 => (282, 5, ((data - 3)) & 0b11111), 195..= 226 => (283, 5, ((data - 3)) & 0b11111), 227..= 257 => (284, 5, ((data - 3)) & 0b11111), _ => (286, 6, 0) }; (num as u16,len as u8,extra as u16) } /* 距離から距離符号と拡張ビットを調べる */ fn distance_extra(data: u32) -> (u8, u8, u16){ let (num, dis, extra) = match data { 1 ..= 4 => (data - 1,0, 0), 5 ..= 6 => (4,1, (data - 1) & 0b1), 7 ..= 8 => (5,1, (data - 1) & 0b1), 9 ..= 12 => (6,2, (data - 1) & 0b11), 13 ..= 16 => (7,2, (data - 1) & 0b11), 17 ..= 24 => (8,3, (data - 1) & 0b111), 25 ..= 32 => (9,3, (data - 1) & 0b111), 33 ..= 48 => (10,4, (data - 1) & 0b1111), 49 ..= 64 => (11,4, (data - 1) & 0b1111),
<u8>{ self.push_pk0506(); self.push16(0x0000); self.push16(0x0000); self.push16(0x0001); self.push16(0x0001); self.push32(header_size); self.push32(header_start); self.push16(0x00); self.buffer } /* cloneの実装を行なっている */ pub fn clone(&self) -> Self { Header::new(self.before_size, self.after_size, self.filename.clone(), sel
identifier_body
lib.rs
0; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", self.buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); println!("{:02x?}", self.output_vector); } } Ok(()) } /* bufferが8ビット(1バイト)溜まった時に実行される */ fn flush_to_output(&mut self) -> Result<(), Error> { let mut buffer = 0; for i in 0..8 { buffer <<= 1; buffer |= (self.buffer >> i) & 1; } self.output_vector.push(buffer.clone()); if PRINT_DEBUG == true { println!("push data: {:08b}", buffer); for i in 0..(self.output_vector.len()){ print!("{:08b}", self.output_vector[i]); } println!(); } self.buffer = 0; self.bit_count = 0; Ok(()) } } /* 読み込みをbyteで保持するもの buffer: データをMAX_BUFFER_SIZE分取り込むための配列。 buf_count: 現在bufferが何個目まで読まれているかを保持する。 buf_size: bufferの何番目までデータがあるかを保持する flag: 読み込むデータがもうない時に使用する。 file_size: 入力ファイルのサイズを記録する。 input: 入力ファイルの情報を記録する。 */ struct ByteReader<'a, T: Read> { buffer: [u8; MAX_BUFFER_SIZE], buf_count: usize, buf_size: usize, flag: bool, file_size: u32, input: &'a mut T, } impl<'a, T: Read> ByteReader<'a, T> { pub fn new(input: &'a mut T) -> Self { let mut reader = ByteReader { buffer: [0; MAX_BUFFER_SIZE], buf_count: 0, buf_size: 0, flag: true, file_size: 0, input, }; let _ = reader.load_next_byte(); reader } /* bufferが最後まで読まれたり、最初の読み込みの際に実行される。 */ fn load_next_byte(&mut self) -> Result<(), std::io::Error>{ match self.input.read(&mut self.buffer)? { 0 => { self.flag = false; self.buf_size = 0; }, n => { self.file_size += n as u32; self.buf_size = n; self.flag = true; } }; Ok(()) } /* buf_countの位置にあるバイトを返す。
pub fn seek_byte(&mut self) -> u8{ self.buffer[self.buf_count] } /* bit_countを進める。bufferの最後まできていた場合には load_next_byteで次のブロックを読み込む。 */ pub fn next_byte(&mut self) { if self.buf_count + 1 < self.buf_size { self.buf_count += 1; } else { let _ = self.load_next_byte(); self.buf_count = 0; } } /* bit_countの位置にあるバイトを返して、next_byteを読みこむ */ pub fn get_byte(&mut self) -> u8 { let buffer = self.buffer[self.buf_count]; self.next_byte(); buffer } } /* Crc32を計算するための構造体 crc32の実装については下のurlを参考に行なった。 https://www.slideshare.net/7shi/crc32 divisor: 除算を行う際に使用するbit列を保持する non_divisor: 除算される側のデータを保持する buffer: とりあえずのデータを保持する buf_count: bufferが何bit処理されたかを保持する first_count: 最初の4バイトは反転する必要があるためカウントする */ struct Crc32 { divisor: u32, non_divisor: u32, buffer: u8, buf_count: u8, first_count: u8, } impl Crc32 { pub fn new() -> Self { Crc32{ divisor: 0b100110000010001110110110111, non_divisor: 0, buffer: 0, buf_count: 0, first_count: 0, } } /* non_divisorやbufferにデータを保持させるもの */ pub fn push_buf(&mut self, buf: u8){ let mut buffer: u8 = 0; for i in 0..8 { buffer <<= 1; buffer |= (buf >> i) & 1; } if self.first_count < 4 { self.non_divisor <<= 8; self.non_divisor +=!buffer as u32; self.first_count += 1; } else { self.buffer = buffer.clone(); self.buf_count = 8; self.bit_shift(); } } /* 先頭bitが立っている場合には除算を行い、それ以外の場合にはbufferのbitを先頭から突っ込む */ fn bit_shift(&mut self){ for i in 0..self.buf_count{ if self.non_divisor >= 2147483648{ self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; self.xor(); } else { self.non_divisor <<= 1; self.non_divisor |= (((self.buffer as u16) >> (self.buf_count - i - 1)) & 1) as u32; } } self.buf_count = 0 } /* 除算を行う。実際にはxor */ fn xor(&mut self){ let buffer = self.non_divisor ^ self.divisor; self.non_divisor = buffer; } /* 現在のnon_divisorからcrc32を計算してそれを返す */ fn get_crc32(&mut self) -> u32 { self.push_buf(0); self.push_buf(0); self.push_buf(0); self.push_buf(0); let mut buffer: u32 = 0; for i in 0..32 { buffer <<= 1; buffer |= (self.non_divisor >> i) & 1; } if PRINT_DEBUG == true { println!("crc32: {:08x?}",!buffer); } !buffer } } /* zipのローカルヘッダーやセントラルヘッダー、エンドセントラルヘッダなどを 保持するための構造体 buffer: ヘッダー情報を保持する before_size: 圧縮前のサイズを保持する after_size: 圧縮後のサイズを保持する filename: ファイルの名前を保持する crc32: crc32の情報を保持する hms: 時間, 分, 秒のデータを保持する ymd: 年, 月, 日のデータを保持する */ struct Header{ buffer: Vec<u8>, before_size: u32, after_size: u32, filename: String, crc32: u32, hms: u16, ymd: u16, } impl Header { pub fn new(before_size: u32, after_size: u32, filename: impl Into<String>, crc32: u32, hms: u16, ymd: u16) -> Self { Header{ buffer: Vec::new(), before_size, after_size, filename: filename.into(), crc32, hms, ymd, } } /* 32bitの情報をbufferに追加する */ fn push32(&mut self, num: u32) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); let c = (num >> 16) & (0b11111111); let d = (num >> 24) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); self.buffer.push(c as u8); self.buffer.push(d as u8); } /* 16bitの情報をbufferに追加する */ fn push16(&mut self, num: u16) { let a = num & 0b11111111; let b = (num >> 8) & (0b11111111); self.buffer.push(a as u8); self.buffer.push(b as u8); } /* PK0506ヘッダであることを示す情報を追加する */ fn push_pk0506(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x05); self.buffer.push(0x06); } /* PK0304ヘッダであることを示す情報を追加する */ fn push_pk0304(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x03); self.buffer.push(0x04); } /* PK0102ヘッダであることを示す情報を追加する */ fn push_pk0102(&mut self){ self.buffer.push(0x50); self.buffer.push(0x4b); self.buffer.push(0x01); self.buffer.push(0x02); } /* ファイルの名前の情報を追加する */ fn push_filename(&mut self){ let bytes: &[u8] = self.filename.as_bytes(); for i in 0..bytes.len() { self.buffer.push(bytes[i]); } } /* ローカルヘッダーに必要な情報をもらって、ローカルヘッダーを作成する 構造 8byte: PK0304ヘッダを示す情報 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮: 0008) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ(mとする) 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: コメントがあればその長さ(今回はないものとしている) nbyte: ファイル名 mbyte: 圧縮したデータ(ここではpushしておらず、ファイルに書き込む際に追加している) */ pub fn local_header(mut self) -> Vec<u8> { self.push_pk0304(); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push_filename(); self.buffer } /* セントラルヘッダーに必要な情報をもらって、セントラルヘッダーを作成する 8byte: PK0102ヘッダを示す情報 4byte: 作成したバージョン(ここでは2.0としている) 4byte: 展開に必要なバージョン(2.0) 4byte: オプション(今回は設定していない) 4byte: 使用圧縮アルゴリズム(deflate圧縮) 4byte: 時刻 4byte: 日付 8byte: crc32情報 8byte: 圧縮後のサイズ 8byte: 圧縮前のサイズ 4byte: ファイル名の長さ(nとする) 4byte: 拡張フィールドの長さ。(使用していないため0) 4byte: コメントがあればその長さ(今回はないものとしている) 4byte: 分割されている場合、対応するPK0304ヘッダが格納されたパートの番号 (分割していないため0) 4byte: 対応するPK0304に格納したファイルの属性情報(0としている) 8byte: OSで保持していた対象ファイルの属性情報(0としている) 8byte: 対応するPK0304ヘッダの位置 (今回はファイル一つのみの設定であるため0としている) nbyte: ファイル名 */ pub fn central_header(mut self) -> Vec<u8> { self.push_pk0102(); self.push16(0x0314); self.push16(0x0014); self.push16(0x0000); self.push16(0x0008); self.push16(self.hms); self.push16(self.ymd); self.push32(self.crc32); self.push32(self.after_size); self.push32(self.before_size); self.push16((self.filename.len()) as u16); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push16(0x0000); self.push32(0x00000000); self.push32(0x00000000); self.push_filename(); self.buffer } /* エンドセントラルヘッダーに必要な情報をもらって、エンドセントラルヘッダーを作成する 8byte: PK0506ヘッダを示す情報 4byte: 分割している場合にはこのパートの番号(分割していないため0) 4byte: 分割している場合には最初のPK0304が格納されたパートの番号(分割していないため0) 4byte: 分割時にこのパートに格納されているファイル数(分割していないため下と同じ) 4byte: 圧縮したファイルの数(1としている) 8byte: PK0102ヘッダの合計サイズ 8byte: PK0102ヘッダの開始位置 4byte: コメントの長さ(今回は無し) */ pub fn end_header(mut self, header_size: u32, header_start: u32) -> Vec<u8>{ self.push_pk0506(); self.push16(0x0000); self.push16(0x0000); self.push16(0x0001); self.push16(0x0001); self.push32(header_size); self.push32(header_start); self.push16(0x00); self.buffer } /* cloneの実装を行なっている */ pub fn clone(&self) -> Self { Header::new(self.before_size, self.after_size, self.filename.clone(), self.crc32, self.hms, self.ymd) } } /* ファイルの最終更新日時を取得してそれぞれをzipに必要な形式にして返す。 下のurlのヘッダ構造の部分から形式を知った。 https://hgotoh.jp/wiki/doku.php/documents/other/other-017 */ fn time_data(filename: &str) -> (u16, u16) { let times; if let Ok(metadata) = metadata(filename) { if let Ok(time) = metadata.modified() { if let Ok(epoch) = time.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } else { let now = std::time::SystemTime::now(); if let Ok(epoch) = now.duration_since(std::time::SystemTime::UNIX_EPOCH) { times = epoch.as_secs(); } else { times = 0; } } } else { times = 0; } let data = Local.timestamp(times as i64, 0); let mut hms = 0; hms += (data.hour() as u32)<< 11; hms += (data.minute() as u32) << 5; hms += (data.second() as u32) / 2; let mut ymd = 0; ymd += ((data.year() - 1980) as u32) << 9; ymd += (data.month() as u32) << 5; ymd += data.day() as u32; (hms as u16, ymd as u16) } /* windowの中にcheckと同じ並びのものがあるかを調べる。 あった際には距離を返す。 */ fn match_check<T: Eq>(window: &[T], check: &[T]) -> isize { if window.len() < check.len(){ return -1; } 'outer: for i in 0..(window.len() - check.len() + 1) { for j in 0..(check.len()){ if window[i + j]!= check[j]{ continue 'outer; } } if PRINT_DEBUG == true { println!("{} {} {}", window.len(), check.len(), i); } return (window.len() - check.len() - i + 1) as isize; } -1 } /* 固定ハフマンに変換する */ fn changer(num: usize) -> (u8, u16) { let (len, re) = match num { 0 ..= 143 => (8, num + 0x30 ), 144..= 255 => (9, num + 0x91 ), 256..= 279 => (7, num - 0x100), 280..= 287 => (8, num - 0x58 ), _ => (0, 512), }; (len, re as u16) } /* 長さから長さ符号と拡張ビットを調べる */ fn length_extra(data: u16) -> (u16, u8, u16){ let (num, len, extra) = match data { 3 ..= 10 => (data + 254, 0, 0), 11 ..= 12 => (265, 1, ((data - 3)) & 0b1), 13 ..= 14 => (266, 1, ((data - 3)) & 0b1), 15 ..= 16 => (267, 1, ((data - 3)) & 0b1), 17 ..= 18 => (268, 1, ((data - 3)) & 0b1), 19 ..= 22 => (269, 2, ((data - 3)) & 0b11), 23 ..= 26 => (270, 2, ((data - 3)) & 0b11), 27 ..= 30 => (271, 2, ((data - 3)) & 0b11), 31 ..= 34 => (272, 2, ((data - 3)) & 0b11), 35 ..= 42 => (273, 3, ((data - 3)) & 0b111), 43 ..= 50 => (274, 3, ((data - 3)) & 0b111), 51 ..= 58 => (275, 3, ((data - 3)) & 0b111), 59 ..= 66 => (276, 3, ((data - 3)) & 0b111), 67 ..= 82 => (277, 4, ((data - 3)) & 0b1111), 83 ..= 98 => (278, 4, ((data - 3)) & 0b1111), 99 ..= 114 => (279, 4, ((data - 3)) & 0b1111), 115..= 130 => (280, 4, ((data - 3)) & 0b1111), 131..= 162 => (281, 5, ((data - 3)) & 0b11111), 163..= 194 => (282, 5, ((data - 3)) & 0b11111), 195..= 226 => (283, 5, ((data - 3)) & 0b11111), 227..= 257 => (284, 5, ((data - 3)) & 0b11111), _ => (286, 6, 0) }; (num as u16,len as u8,extra as u16) } /* 距離から距離符号と拡張ビットを調べる */ fn distance_extra(data: u32) -> (u8, u8, u16){ let (num, dis, extra) = match data { 1 ..= 4 => (data - 1,0, 0), 5 ..= 6 => (4,1, (data - 1) & 0b1), 7 ..= 8 => (5,1, (data - 1) & 0b1), 9 ..= 12 => (6,2, (data - 1) & 0b11), 13 ..= 16 => (7,2, (data - 1) & 0b11), 17 ..= 24 => (8,3, (data - 1) & 0b111), 25 ..= 32 => (9,3, (data - 1) & 0b111), 33 ..= 48 => (10,4, (data - 1) & 0b1111), 49 ..= 64 => (11,4, (data - 1) & 0b1111),
*/
random_line_split
main.rs
// Copyright 2018-2019 Peter Williams <[email protected]> // Licensed under the MIT License. //! The main CLI driver logic. #![deny(missing_docs)] #![allow(proc_macro_derive_resolution_fallback)] extern crate app_dirs; extern crate chrono; #[macro_use] extern crate clap; // for arg_enum! #[macro_use] extern crate diesel; #[macro_use] extern crate failure; extern crate google_drive3; extern crate humansize; extern crate hyper; extern crate hyper_native_tls; extern crate petgraph; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate structopt; #[macro_use] extern crate tcprint; extern crate tempfile; extern crate timeago; extern crate url; extern crate yup_oauth2; use diesel::prelude::*; use std::collections::hash_map::Entry; use std::ffi::OsStr; use std::process; use std::result::Result as StdResult; use structopt::StructOpt; use tcprint::ColorPrintState; mod accounts; mod app; mod colors; mod database; mod errors; mod google_apis; mod schema; mod token_storage; use app::Application; use colors::Colors; use errors::Result; /// Information used to find out app-specific config files, e.g. the /// application secret. const APP_INFO: app_dirs::AppInfo = app_dirs::AppInfo { name: "drorg", author: "drorg", }; /// Open a URL in a browser. /// /// HACK: I'm sure there's a nice cross-platform crate to do this, but /// I customize it to use my Google-specific Firefox profile. fn open_url<S: AsRef<OsStr>>(url: S) -> Result<()> { use std::process::Command; let status = Command::new("firefox-wayland") .args(&["-P", "google", "--new-window"]) .arg(url) .status()?; if status.success() { Ok(()) } else { Err(format_err!("browser command exited with an error code")) } } /// Show detailed information about one or more documents. #[derive(Debug, StructOpt)] pub struct DrorgInfoOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgInfoOptions { fn cli(self, app: &mut Application) -> Result<i32> { use std::collections::HashMap; app.maybe_sync_all_accounts()?; let mut linkages = HashMap::new(); let results = app.get_docs().process(&self.spec)?; // note: avoid name clash with db table let mut first = true; for doc in results { if first { first = false; } else { tcprintln!(app.ps, ("")); } tcprintln!(app.ps, [hl: "Name:"], (" "), [green: "{}", doc.name]); tcprintln!(app.ps, [hl: "MIME-type:"], (" {}", doc.mime_type)); tcprintln!(app.ps, [hl: "Size:"], (" {}", doc.human_size().unwrap_or_else(|| "N/A".to_owned()))); tcprintln!(app.ps, [hl: "Modified:"], (" {}", doc.utc_mod_time().to_rfc3339())); tcprintln!(app.ps, [hl: "ID:"], (" {}", doc.id)); tcprintln!(app.ps, [hl: "Starred?:"], (" {}", if doc.starred { "yes" } else { "no" })); tcprintln!(app.ps, [hl: "Trashed?:"], (" {}", if doc.trashed { "yes" } else { "no" })); let accounts = doc.accounts(app)?; let mut path_reprs = Vec::new(); for acct in &accounts { if let Entry::Vacant(e) = linkages.entry(acct.id) { let table = app.load_linkage_table(acct.id, true)?; e.insert(table); } let link_table = linkages.get(&acct.id).unwrap(); for p in link_table.find_parent_paths(&doc.id).iter().map(|id_path| { // This is not efficient, and it's panicky, but meh. let names: Vec<_> = id_path .iter() .map(|docid| { use schema::docs::dsl::*; let elem = docs .filter(id.eq(&docid)) .first::<database::Doc>(&app.conn) .unwrap(); elem.name }) .collect(); names.join(" > ") }) { path_reprs.push(format!("{}: {}", acct.email, p)); }
_n => { tcprintln!(app.ps, [hl: "Paths::"]); for p in path_reprs { tcprintln!(app.ps, (" {}", p)); } } } tcprintln!(app.ps, [hl: "Open-URL:"], (" {}", doc.open_url())); } Ok(0) } } /// List documents. #[derive(Debug, StructOpt)] pub struct DrorgListOptions { #[structopt(help = "A document specifier (name, ID,...)", required_unless = "all")] spec: Option<String>, #[structopt( long = "all", help = "List all documents in the database", conflicts_with = "spec" )] all: bool, } impl DrorgListOptions { fn cli(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let results = if self.all { app.get_docs().all() } else { app.get_docs().process(&self.spec.unwrap()) }?; app.print_doc_list(results)?; Ok(0) } } /// The command-line action to add a login to the credentials DB. /// /// Note that "email" doesn't really have to be an email address -- it can be /// any random string; the user chooses which account to login-to /// interactively during the login process. But I think it makes sense from a /// UI perspective to just call it "email" and let the user figure out for /// themselves that they can give it some other value if they feel like it. #[derive(Debug, StructOpt)] pub struct DrorgLoginOptions {} impl DrorgLoginOptions { /// The auth flow here will print out a message on the console, asking the /// user to go to a URL, following instructions, and paste a string back /// into the client. /// /// We want to allow the user to login to multiple accounts /// simultaneously. Therefore we set up the authenticator flow with a null /// storage, and then add the resulting token to the disk storage. fn cli(self, app: &mut Application) -> Result<i32> { let mut account = accounts::Account::default(); // First we need to get authorization. account.authorize_interactively(&app.secret)?; // Now, for bookkeeping, we look up the email address associated with // it. We could just have the user specify an identifier, but I went // to the trouble to figure out how to do this right, so... let email_addr = account.fetch_email_address(&app.secret)?; tcprintln!(app.ps, ("Successfully logged in to "), [hl: "{}", email_addr], (".")); // We might need to add this account to the database. To have sensible // foreign key relations, the email address is not the primary key of // the accounts table, so we need to see whether there's already an // existing row for this account (which could happen if the user // re-logs-in, etc.) If we add a new row, we have to do this awkward // bit where we insert and then immediately query for the row we just // added (cf https://github.com/diesel-rs/diesel/issues/771 ). { use diesel::prelude::*; use schema::accounts::dsl::*; let maybe_row = accounts .filter(email.eq(&email_addr)) .first::<database::Account>(&app.conn) .optional()?; let row_id = if let Some(row) = maybe_row { row.id } else { let new_account = database::NewAccount::new(&email_addr); diesel::replace_into(accounts) .values(&new_account) .execute(&app.conn)?; let row = accounts .filter(email.eq(&email_addr)) .first::<database::Account>(&app.conn)?; row.id }; account.data.db_id = row_id; // JSON will be rewritten in acquire_change_page_token below. } // Initialize our token for checking for changes to the documents. We // do this *before* scanning the complete listing; there's going to be // a race condition either way, but the one that happens with this // ordering seems like it would be more benign. account.acquire_change_page_token(&app.secret)?; // OK, now actually slurp in the list of documents. tcprintln!(app.ps, ("Scanning documents...")); app.import_documents(&mut account)?; // All done. tcprintln!(app.ps, ("Done.")); Ok(0) } } /// List the files in a folder. /// /// TODO: this name is going to be super confusing compared to `list`. #[derive(Debug, StructOpt)] pub struct DrorgLsOptions { #[structopt(help = "A folder specifier (name, ID,...)")] spec: String, } impl DrorgLsOptions { fn cli(self, app: &mut Application) -> Result<i32> { use std::collections::HashSet; app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; // We *could* just proceed and see if there's anything that Drive // thinks is a child of this doc, but it seems like the more sensible // UX is to make this a hard failure. You could imagine adding a CLI // option to override this behavior. if!doc.is_folder() { return Err(format_err!("the selected document is not a folder")); } // This is another operation which can be surprising when you think // about the behavior when a doc belongs to more than one account. We // find children for each account separately and merge the results. let accounts = doc.accounts(app)?; let mut child_ids = HashSet::new(); if accounts.len() > 1 { tcreport!(app.ps, warning: "folder belongs to multiple accounts; \ their listings will be merged"); } for acct in &accounts { let table = app.load_linkage_table(acct.id, false)?; let node = match table.nodes.get(&doc.id) { Some(n) => *n, None => continue, }; for child_idx in table.graph.neighbors(node) { child_ids.insert(table.graph[child_idx].clone()); } } // Is this the best ordering? let mut docs = app.ids_to_docs(&child_ids); docs.sort_by_key(|d| d.utc_mod_time()); docs.reverse(); app.print_doc_list(docs)?; app.set_cwd(&doc)?; Ok(0) } } /// Open a document. #[derive(Debug, StructOpt)] pub struct DrorgOpenOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgOpenOptions { fn cli(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; open_url(doc.open_url())?; Ok(0) } } /// List recently-used documents. #[derive(Debug, StructOpt)] pub struct DrorgRecentOptions { #[structopt( short = "n", help = "Limit output to this many documents", default_value = "10" )] limit: i64, } impl DrorgRecentOptions { fn cli(self, app: &mut Application) -> Result<i32> { use schema::docs::dsl::*; app.maybe_sync_all_accounts()?; let listing = docs .order(modified_time.desc()) .limit(self.limit) .load::<database::Doc>(&app.conn)?; app.print_doc_list(listing)?; Ok(0) } } /// Synchronize with the cloud. #[derive(Debug, StructOpt)] pub struct DrorgSyncOptions { #[structopt(long = "rebuild", help = "Rebuild all account data from scratch")] rebuild: bool, } impl DrorgSyncOptions { fn cli(self, app: &mut Application) -> Result<i32> { if!self.rebuild { // Lightweight sync app.options.sync = app::SyncOption::Yes; app.maybe_sync_all_accounts()?; } else { // Heavyweight -- rebuild account data from scratch. for maybe_info in accounts::get_accounts()? { let (email, mut account) = maybe_info?; // TODO: delete all links involving documents from this account. // To be safest, perhaps we should destroy all database rows // associated with this account? // Redo the initialization rigamarole from the "login" command. tcprintln!(app.ps, ("Rebuilding "), [hl: "{}", email], ("...")); account.acquire_change_page_token(&app.secret)?; app.import_documents(&mut account)?; } } Ok(0) } } /// Print the URL of a document. #[derive(Debug, StructOpt)] pub struct DrorgUrlOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgUrlOptions { fn cli(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; println!("{}", doc.open_url()); Ok(0) } } /// The main StructOpt type for dispatching subcommands. #[derive(Debug, StructOpt)] pub enum DrorgSubcommand { #[structopt(name = "info")] /// Show detailed information about one or more documents Info(DrorgInfoOptions), #[structopt(name = "list")] /// List documents in a compact format (note: `ls` is different) List(DrorgListOptions), #[structopt(name = "login")] /// Add a Google account to be monitored Login(DrorgLoginOptions), #[structopt(name = "ls")] /// List files in a folder (note: `list` is different) Ls(DrorgLsOptions), #[structopt(name = "open")] /// Open a document in a web browser Open(DrorgOpenOptions), #[structopt(name = "recent")] /// List recently-used documents Recent(DrorgRecentOptions), #[structopt(name = "sync")] /// Synchronize with the cloud Sync(DrorgSyncOptions), #[structopt(name = "url")] /// Print the URL to open a document Url(DrorgUrlOptions), } /// The main StructOpt argument dispatcher. #[derive(Debug, StructOpt)] #[structopt(name = "drorg", about = "Organize documents on Google Drive.")] pub struct DrorgCli { #[structopt(subcommand)] command: DrorgSubcommand, #[structopt(flatten)] app_opts: app::ApplicationOptions, } impl DrorgCli { fn cli(self) -> StdResult<i32, (failure::Error, Option<ColorPrintState<Colors>>)> { let mut app = match Application::initialize(self.app_opts) { Ok(a) => a, Err(e) => return Err((e, None)), // no colors :-( }; let result = match self.command { DrorgSubcommand::Info(opts) => opts.cli(&mut app), DrorgSubcommand::List(opts) => opts.cli(&mut app), DrorgSubcommand::Login(opts) => opts.cli(&mut app), DrorgSubcommand::Ls(opts) => opts.cli(&mut app), DrorgSubcommand::Open(opts) => opts.cli(&mut app), DrorgSubcommand::Recent(opts) => opts.cli(&mut app), DrorgSubcommand::Sync(opts) => opts.cli(&mut app), DrorgSubcommand::Url(opts) => opts.cli(&mut app), }; result.map_err(|e| (e, Some(app.ps))) } } fn main() { let program = DrorgCli::from_args(); process::exit(match program.cli() { Ok(code) => code, Err((e, maybe_ps)) => { if let Some(mut ps) = maybe_ps { tcprintln!(ps, [red: "fatal error"], (" in drorg")); for cause in e.iter_chain() { tcprintln!(ps, (" "), [red: "caused by:"], (" {}", cause)); } } else { eprintln!("fatal error in drorg"); for cause in e.iter_chain() { eprintln!(" caused by: {}", cause); } } 1 } }); }
} match path_reprs.len() { 0 => tcprintln!(app.ps, [hl: "Path:"], (" [none??]")), 1 => tcprintln!(app.ps, [hl: "Path:"], (" {}", path_reprs[0])),
random_line_split
main.rs
// Copyright 2018-2019 Peter Williams <[email protected]> // Licensed under the MIT License. //! The main CLI driver logic. #![deny(missing_docs)] #![allow(proc_macro_derive_resolution_fallback)] extern crate app_dirs; extern crate chrono; #[macro_use] extern crate clap; // for arg_enum! #[macro_use] extern crate diesel; #[macro_use] extern crate failure; extern crate google_drive3; extern crate humansize; extern crate hyper; extern crate hyper_native_tls; extern crate petgraph; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; extern crate structopt; #[macro_use] extern crate tcprint; extern crate tempfile; extern crate timeago; extern crate url; extern crate yup_oauth2; use diesel::prelude::*; use std::collections::hash_map::Entry; use std::ffi::OsStr; use std::process; use std::result::Result as StdResult; use structopt::StructOpt; use tcprint::ColorPrintState; mod accounts; mod app; mod colors; mod database; mod errors; mod google_apis; mod schema; mod token_storage; use app::Application; use colors::Colors; use errors::Result; /// Information used to find out app-specific config files, e.g. the /// application secret. const APP_INFO: app_dirs::AppInfo = app_dirs::AppInfo { name: "drorg", author: "drorg", }; /// Open a URL in a browser. /// /// HACK: I'm sure there's a nice cross-platform crate to do this, but /// I customize it to use my Google-specific Firefox profile. fn open_url<S: AsRef<OsStr>>(url: S) -> Result<()> { use std::process::Command; let status = Command::new("firefox-wayland") .args(&["-P", "google", "--new-window"]) .arg(url) .status()?; if status.success() { Ok(()) } else { Err(format_err!("browser command exited with an error code")) } } /// Show detailed information about one or more documents. #[derive(Debug, StructOpt)] pub struct DrorgInfoOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgInfoOptions { fn cli(self, app: &mut Application) -> Result<i32> { use std::collections::HashMap; app.maybe_sync_all_accounts()?; let mut linkages = HashMap::new(); let results = app.get_docs().process(&self.spec)?; // note: avoid name clash with db table let mut first = true; for doc in results { if first { first = false; } else { tcprintln!(app.ps, ("")); } tcprintln!(app.ps, [hl: "Name:"], (" "), [green: "{}", doc.name]); tcprintln!(app.ps, [hl: "MIME-type:"], (" {}", doc.mime_type)); tcprintln!(app.ps, [hl: "Size:"], (" {}", doc.human_size().unwrap_or_else(|| "N/A".to_owned()))); tcprintln!(app.ps, [hl: "Modified:"], (" {}", doc.utc_mod_time().to_rfc3339())); tcprintln!(app.ps, [hl: "ID:"], (" {}", doc.id)); tcprintln!(app.ps, [hl: "Starred?:"], (" {}", if doc.starred { "yes" } else { "no" })); tcprintln!(app.ps, [hl: "Trashed?:"], (" {}", if doc.trashed { "yes" } else { "no" })); let accounts = doc.accounts(app)?; let mut path_reprs = Vec::new(); for acct in &accounts { if let Entry::Vacant(e) = linkages.entry(acct.id) { let table = app.load_linkage_table(acct.id, true)?; e.insert(table); } let link_table = linkages.get(&acct.id).unwrap(); for p in link_table.find_parent_paths(&doc.id).iter().map(|id_path| { // This is not efficient, and it's panicky, but meh. let names: Vec<_> = id_path .iter() .map(|docid| { use schema::docs::dsl::*; let elem = docs .filter(id.eq(&docid)) .first::<database::Doc>(&app.conn) .unwrap(); elem.name }) .collect(); names.join(" > ") }) { path_reprs.push(format!("{}: {}", acct.email, p)); } } match path_reprs.len() { 0 => tcprintln!(app.ps, [hl: "Path:"], (" [none??]")), 1 => tcprintln!(app.ps, [hl: "Path:"], (" {}", path_reprs[0])), _n => { tcprintln!(app.ps, [hl: "Paths::"]); for p in path_reprs { tcprintln!(app.ps, (" {}", p)); } } } tcprintln!(app.ps, [hl: "Open-URL:"], (" {}", doc.open_url())); } Ok(0) } } /// List documents. #[derive(Debug, StructOpt)] pub struct DrorgListOptions { #[structopt(help = "A document specifier (name, ID,...)", required_unless = "all")] spec: Option<String>, #[structopt( long = "all", help = "List all documents in the database", conflicts_with = "spec" )] all: bool, } impl DrorgListOptions { fn cli(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let results = if self.all { app.get_docs().all() } else { app.get_docs().process(&self.spec.unwrap()) }?; app.print_doc_list(results)?; Ok(0) } } /// The command-line action to add a login to the credentials DB. /// /// Note that "email" doesn't really have to be an email address -- it can be /// any random string; the user chooses which account to login-to /// interactively during the login process. But I think it makes sense from a /// UI perspective to just call it "email" and let the user figure out for /// themselves that they can give it some other value if they feel like it. #[derive(Debug, StructOpt)] pub struct DrorgLoginOptions {} impl DrorgLoginOptions { /// The auth flow here will print out a message on the console, asking the /// user to go to a URL, following instructions, and paste a string back /// into the client. /// /// We want to allow the user to login to multiple accounts /// simultaneously. Therefore we set up the authenticator flow with a null /// storage, and then add the resulting token to the disk storage. fn cli(self, app: &mut Application) -> Result<i32> { let mut account = accounts::Account::default(); // First we need to get authorization. account.authorize_interactively(&app.secret)?; // Now, for bookkeeping, we look up the email address associated with // it. We could just have the user specify an identifier, but I went // to the trouble to figure out how to do this right, so... let email_addr = account.fetch_email_address(&app.secret)?; tcprintln!(app.ps, ("Successfully logged in to "), [hl: "{}", email_addr], (".")); // We might need to add this account to the database. To have sensible // foreign key relations, the email address is not the primary key of // the accounts table, so we need to see whether there's already an // existing row for this account (which could happen if the user // re-logs-in, etc.) If we add a new row, we have to do this awkward // bit where we insert and then immediately query for the row we just // added (cf https://github.com/diesel-rs/diesel/issues/771 ). { use diesel::prelude::*; use schema::accounts::dsl::*; let maybe_row = accounts .filter(email.eq(&email_addr)) .first::<database::Account>(&app.conn) .optional()?; let row_id = if let Some(row) = maybe_row { row.id } else { let new_account = database::NewAccount::new(&email_addr); diesel::replace_into(accounts) .values(&new_account) .execute(&app.conn)?; let row = accounts .filter(email.eq(&email_addr)) .first::<database::Account>(&app.conn)?; row.id }; account.data.db_id = row_id; // JSON will be rewritten in acquire_change_page_token below. } // Initialize our token for checking for changes to the documents. We // do this *before* scanning the complete listing; there's going to be // a race condition either way, but the one that happens with this // ordering seems like it would be more benign. account.acquire_change_page_token(&app.secret)?; // OK, now actually slurp in the list of documents. tcprintln!(app.ps, ("Scanning documents...")); app.import_documents(&mut account)?; // All done. tcprintln!(app.ps, ("Done.")); Ok(0) } } /// List the files in a folder. /// /// TODO: this name is going to be super confusing compared to `list`. #[derive(Debug, StructOpt)] pub struct DrorgLsOptions { #[structopt(help = "A folder specifier (name, ID,...)")] spec: String, } impl DrorgLsOptions { fn cli(self, app: &mut Application) -> Result<i32> { use std::collections::HashSet; app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; // We *could* just proceed and see if there's anything that Drive // thinks is a child of this doc, but it seems like the more sensible // UX is to make this a hard failure. You could imagine adding a CLI // option to override this behavior. if!doc.is_folder() { return Err(format_err!("the selected document is not a folder")); } // This is another operation which can be surprising when you think // about the behavior when a doc belongs to more than one account. We // find children for each account separately and merge the results. let accounts = doc.accounts(app)?; let mut child_ids = HashSet::new(); if accounts.len() > 1 { tcreport!(app.ps, warning: "folder belongs to multiple accounts; \ their listings will be merged"); } for acct in &accounts { let table = app.load_linkage_table(acct.id, false)?; let node = match table.nodes.get(&doc.id) { Some(n) => *n, None => continue, }; for child_idx in table.graph.neighbors(node) { child_ids.insert(table.graph[child_idx].clone()); } } // Is this the best ordering? let mut docs = app.ids_to_docs(&child_ids); docs.sort_by_key(|d| d.utc_mod_time()); docs.reverse(); app.print_doc_list(docs)?; app.set_cwd(&doc)?; Ok(0) } } /// Open a document. #[derive(Debug, StructOpt)] pub struct DrorgOpenOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgOpenOptions { fn
(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; open_url(doc.open_url())?; Ok(0) } } /// List recently-used documents. #[derive(Debug, StructOpt)] pub struct DrorgRecentOptions { #[structopt( short = "n", help = "Limit output to this many documents", default_value = "10" )] limit: i64, } impl DrorgRecentOptions { fn cli(self, app: &mut Application) -> Result<i32> { use schema::docs::dsl::*; app.maybe_sync_all_accounts()?; let listing = docs .order(modified_time.desc()) .limit(self.limit) .load::<database::Doc>(&app.conn)?; app.print_doc_list(listing)?; Ok(0) } } /// Synchronize with the cloud. #[derive(Debug, StructOpt)] pub struct DrorgSyncOptions { #[structopt(long = "rebuild", help = "Rebuild all account data from scratch")] rebuild: bool, } impl DrorgSyncOptions { fn cli(self, app: &mut Application) -> Result<i32> { if!self.rebuild { // Lightweight sync app.options.sync = app::SyncOption::Yes; app.maybe_sync_all_accounts()?; } else { // Heavyweight -- rebuild account data from scratch. for maybe_info in accounts::get_accounts()? { let (email, mut account) = maybe_info?; // TODO: delete all links involving documents from this account. // To be safest, perhaps we should destroy all database rows // associated with this account? // Redo the initialization rigamarole from the "login" command. tcprintln!(app.ps, ("Rebuilding "), [hl: "{}", email], ("...")); account.acquire_change_page_token(&app.secret)?; app.import_documents(&mut account)?; } } Ok(0) } } /// Print the URL of a document. #[derive(Debug, StructOpt)] pub struct DrorgUrlOptions { #[structopt(help = "A document specifier (name, ID,...)")] spec: String, } impl DrorgUrlOptions { fn cli(self, app: &mut Application) -> Result<i32> { app.maybe_sync_all_accounts()?; let doc = app.get_docs().process_one(self.spec)?; println!("{}", doc.open_url()); Ok(0) } } /// The main StructOpt type for dispatching subcommands. #[derive(Debug, StructOpt)] pub enum DrorgSubcommand { #[structopt(name = "info")] /// Show detailed information about one or more documents Info(DrorgInfoOptions), #[structopt(name = "list")] /// List documents in a compact format (note: `ls` is different) List(DrorgListOptions), #[structopt(name = "login")] /// Add a Google account to be monitored Login(DrorgLoginOptions), #[structopt(name = "ls")] /// List files in a folder (note: `list` is different) Ls(DrorgLsOptions), #[structopt(name = "open")] /// Open a document in a web browser Open(DrorgOpenOptions), #[structopt(name = "recent")] /// List recently-used documents Recent(DrorgRecentOptions), #[structopt(name = "sync")] /// Synchronize with the cloud Sync(DrorgSyncOptions), #[structopt(name = "url")] /// Print the URL to open a document Url(DrorgUrlOptions), } /// The main StructOpt argument dispatcher. #[derive(Debug, StructOpt)] #[structopt(name = "drorg", about = "Organize documents on Google Drive.")] pub struct DrorgCli { #[structopt(subcommand)] command: DrorgSubcommand, #[structopt(flatten)] app_opts: app::ApplicationOptions, } impl DrorgCli { fn cli(self) -> StdResult<i32, (failure::Error, Option<ColorPrintState<Colors>>)> { let mut app = match Application::initialize(self.app_opts) { Ok(a) => a, Err(e) => return Err((e, None)), // no colors :-( }; let result = match self.command { DrorgSubcommand::Info(opts) => opts.cli(&mut app), DrorgSubcommand::List(opts) => opts.cli(&mut app), DrorgSubcommand::Login(opts) => opts.cli(&mut app), DrorgSubcommand::Ls(opts) => opts.cli(&mut app), DrorgSubcommand::Open(opts) => opts.cli(&mut app), DrorgSubcommand::Recent(opts) => opts.cli(&mut app), DrorgSubcommand::Sync(opts) => opts.cli(&mut app), DrorgSubcommand::Url(opts) => opts.cli(&mut app), }; result.map_err(|e| (e, Some(app.ps))) } } fn main() { let program = DrorgCli::from_args(); process::exit(match program.cli() { Ok(code) => code, Err((e, maybe_ps)) => { if let Some(mut ps) = maybe_ps { tcprintln!(ps, [red: "fatal error"], (" in drorg")); for cause in e.iter_chain() { tcprintln!(ps, (" "), [red: "caused by:"], (" {}", cause)); } } else { eprintln!("fatal error in drorg"); for cause in e.iter_chain() { eprintln!(" caused by: {}", cause); } } 1 } }); }
cli
identifier_name
finality.rs
// Copyright 2015-2018 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Finality proof generation and checking. use ed25519_dalek::{PublicKey}; use std::collections::{VecDeque}; use std::collections::hash_map::{HashMap, Entry}; use engines::authority_round::subst::{H256}; use engines::validator_set::SimpleList; use std::io::Cursor; use std::io::{ Read, Write }; use std::fs::{File}; use std::path::Path; use ton_types::types::ByteOrderRead; /// Error indicating unknown validator. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct UnknownValidator; /// Rolling finality checker for authority round consensus. /// Stores a chain of unfinalized hashes that can be pushed onto. #[derive(Debug, PartialEq)] pub struct RollingFinality { headers: VecDeque<(H256, Vec<u64>)>, signers: SimpleList, sign_count: HashMap<u64, usize>, last_pushed: Option<H256>, } impl RollingFinality { /// Create a blank finality checker under the given validator set. pub fn blank(signers: Vec<PublicKey>) -> Self { RollingFinality { headers: VecDeque::new(), signers: SimpleList::new(signers), sign_count: HashMap::new(), last_pushed: None, } } pub fn add_signer(&mut self, signer: PublicKey) { self.signers.add(signer) } pub fn remove_signer(&mut self, signer: &u64) { self.signers.remove_by_id(signer) } /// Extract unfinalized subchain from ancestry iterator. /// Clears the current subchain. /// /// Fails if any provided signature isn't part of the signers set. pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator> where I: IntoIterator<Item=(H256, Vec<u64>)> { self.clear(); for (hash, signers) in iterable { self.check_signers(&signers)?; if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) } // break when we've got our first finalized block. { let current_signed = self.sign_count.len(); let new_signers = signers.iter().filter(|s|!self.sign_count.contains_key(s)).count(); let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); if would_be_finalized { trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone()); break } for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } } self.headers.push_front((hash, signers)); } trace!(target: "finality", "Rolling finality state: {:?}", self.headers); Ok(()) } /// Clear the finality status, but keeps the validator set. pub fn clear(&mut self) { self.headers.clear(); self.sign_count.clear(); self.last_pushed = None; } /// Returns the last pushed hash. pub fn subchain_head(&self) -> Option<H256> { self.last_pushed.clone() } /// Get an iterator over stored hashes in order. #[cfg(test)] pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> { self.headers.iter().map(|(h, _)| h) } pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> { let mut file_info = File::create(file_name)?; let data = self.serialize_info(); file_info.write_all(&data)?; file_info.flush()?; Ok(()) } pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> { if Path::new(file_name).exists() { let mut file_info = File::open(file_name)?; let mut data = Vec::new(); file_info.read_to_end(&mut data)?; self.deserialize_info(data)?; } Ok(()) } /// serialize block hashes info pub fn serialize_info(&self) -> Vec<u8> { let mut buf = Vec::new(); //serialize sign_count let len = self.sign_count.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for (sign, count) in self.sign_count.iter() { buf.extend_from_slice(&(*sign as u64).to_le_bytes()); buf.extend_from_slice(&(*count as u64).to_le_bytes()); } //serialize headers let len = self.headers.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for h in self.headers.iter() { let (hash, validators) = h.clone(); buf.append(&mut hash.0.to_vec()); let keys_count = validators.len(); buf.extend_from_slice(&(keys_count as u32).to_le_bytes()); for v in validators.iter() { buf.extend_from_slice(&(*v as u64).to_le_bytes()); } } buf } /// deserialize block hashes info pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> { let mut rdr = Cursor::new(data); // deserialize sing_count let len = rdr.read_le_u32()?; for _ in 0..len { let sign = rdr.read_le_u64()?; let count = rdr.read_le_u64()? as usize; self.sign_count.insert(sign, count); } // deserialize headers let len = rdr.read_le_u32()?; for _ in 0..len { let hash = rdr.read_u256()?; let keys_count = rdr.read_le_u32()?; let mut keys: Vec<u64> = vec![]; for _ in 0..keys_count { keys.push(rdr.read_le_u64()?); } self.headers.push_back((H256(hash), keys)); } Ok(()) } /// Get the validator set. pub fn validators(&self) -> &SimpleList { &self.signers } /// Remove last validator from list pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)>
/// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) /// /// Fails if `signer` isn't a member of the active validator set. /// Returns a list of all newly finalized headers. // TODO: optimize with smallvec. pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> { self.check_signers(&signers)?; for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } self.headers.push_back((head.clone(), signers)); let mut newly_finalized = Vec::new(); while self.sign_count.len() * 2 > self.signers.len() { let (hash, signers) = self.headers.pop_front() .expect("headers length always greater than sign count length; qed"); newly_finalized.push(hash); for signer in signers { match self.sign_count.entry(signer) { Entry::Occupied(mut entry) => { // decrement count for this signer and purge on zero. *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"), } } } trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); self.last_pushed = Some(head); Ok(newly_finalized) } fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> { for s in signers.iter() { if!self.signers.contains_id(s) { return Err(UnknownValidator) } } Ok(()) } } #[cfg(test)] mod tests { use ed25519_dalek::PublicKey; use std::fs; use std::path::Path; use ton_block::id_from_key; use super::RollingFinality; use engines::authority_round::subst::{H256}; #[test] fn test_serialation() { let vec = (0..7).map(|_| { let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng()); ed25519_dalek::PublicKey::from(&pvt_key) }).collect::<Vec<ed25519_dalek::PublicKey>>(); let mut bytes = [0u8; 8]; bytes.copy_from_slice(&vec[0].as_bytes()[0..8]); let v1 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[1].as_bytes()[0..8]); let v2 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[2].as_bytes()[0..8]); let v3 = u64::from_be_bytes(bytes); let mut rf = RollingFinality::blank(vec); rf.push_hash(H256([0;32]), vec![v1]).unwrap(); rf.push_hash(H256([1;32]), vec![v2]).unwrap(); rf.push_hash(H256([2;32]), vec![v1]).unwrap(); rf.push_hash(H256([4;32]), vec![v3]).unwrap(); rf.push_hash(H256([5;32]), vec![v3]).unwrap(); let data = rf.serialize_info(); println!("{:?}", data); let mut rf2 = RollingFinality::blank(vec![]); rf2.deserialize_info(data).unwrap(); assert_eq!(rf.headers, rf2.headers); } fn get_keys(n: usize) -> (Vec<PublicKey>, Vec<u64>) { let mut keys = Vec::new(); let mut kids = Vec::new(); for i in 0..n { let name = format!("../config/pub{:02}", i+1); let data = fs::read(Path::new(&name)) .expect(&format!("Error reading key file {}", name)); let key = PublicKey::from_bytes(&data).unwrap(); kids.push(id_from_key(&key)); keys.push(key); } (keys, kids) } #[test] fn rejects_unknown_signers() { let (signers, key_ids) = get_keys(3); let mut finality = RollingFinality::blank(signers); assert!(finality.push_hash(H256::random(), vec![key_ids[0], 0xAA]).is_err()); } #[test] fn finalize_multiple() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); // 3 / 6 signers is < 51% so no finality. for (i, hash) in hashes.iter().take(6).cloned().enumerate() { let i = i % 3; assert!(finality.push_hash(hash, vec![key_ids[i]]).unwrap().len() == 0); } // after pushing a block signed by a fourth validator, the first four // blocks of the unverified chain become verified. assert_eq!( finality.push_hash(hashes[6].clone(), vec![key_ids[4]]).unwrap(), vec![hashes[0].clone(), hashes[1].clone(), hashes[2].clone(), hashes[3].clone()] ); } #[test] fn finalize_multiple_signers() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hash = H256::random(); // after pushing a block signed by four validators, it becomes verified right away. assert_eq!(finality.push_hash(hash.clone(), key_ids).unwrap(), vec![hash]); } #[test] fn from_ancestry() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| (H256::random(), vec![key_ids[i % 6]]) ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); assert_eq!(finality.unfinalized_hashes().count(), 3); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } #[test] fn from_ancestry_multiple_signers() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| { (H256::random(), vec![key_ids[i % 6], key_ids[(i + 1) % 6], key_ids[(i + 2) % 6]]) } ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); // only the last hash has < 51% of authorities' signatures assert_eq!(finality.unfinalized_hashes().count(), 1); assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].clone().0)); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } }
{ self.headers.pop_back() }
identifier_body
finality.rs
// Copyright 2015-2018 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Finality proof generation and checking. use ed25519_dalek::{PublicKey}; use std::collections::{VecDeque}; use std::collections::hash_map::{HashMap, Entry}; use engines::authority_round::subst::{H256}; use engines::validator_set::SimpleList; use std::io::Cursor; use std::io::{ Read, Write }; use std::fs::{File}; use std::path::Path; use ton_types::types::ByteOrderRead; /// Error indicating unknown validator. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct UnknownValidator; /// Rolling finality checker for authority round consensus. /// Stores a chain of unfinalized hashes that can be pushed onto. #[derive(Debug, PartialEq)] pub struct RollingFinality { headers: VecDeque<(H256, Vec<u64>)>, signers: SimpleList, sign_count: HashMap<u64, usize>, last_pushed: Option<H256>, } impl RollingFinality { /// Create a blank finality checker under the given validator set. pub fn blank(signers: Vec<PublicKey>) -> Self { RollingFinality { headers: VecDeque::new(), signers: SimpleList::new(signers), sign_count: HashMap::new(), last_pushed: None, } } pub fn add_signer(&mut self, signer: PublicKey) { self.signers.add(signer) } pub fn remove_signer(&mut self, signer: &u64) { self.signers.remove_by_id(signer) } /// Extract unfinalized subchain from ancestry iterator. /// Clears the current subchain. /// /// Fails if any provided signature isn't part of the signers set. pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator> where I: IntoIterator<Item=(H256, Vec<u64>)> { self.clear(); for (hash, signers) in iterable { self.check_signers(&signers)?; if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) } // break when we've got our first finalized block. { let current_signed = self.sign_count.len(); let new_signers = signers.iter().filter(|s|!self.sign_count.contains_key(s)).count(); let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); if would_be_finalized { trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone()); break } for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } } self.headers.push_front((hash, signers)); } trace!(target: "finality", "Rolling finality state: {:?}", self.headers); Ok(()) } /// Clear the finality status, but keeps the validator set. pub fn clear(&mut self) { self.headers.clear(); self.sign_count.clear(); self.last_pushed = None; } /// Returns the last pushed hash. pub fn subchain_head(&self) -> Option<H256> { self.last_pushed.clone() } /// Get an iterator over stored hashes in order. #[cfg(test)] pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> { self.headers.iter().map(|(h, _)| h) } pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> { let mut file_info = File::create(file_name)?; let data = self.serialize_info(); file_info.write_all(&data)?; file_info.flush()?; Ok(()) } pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> { if Path::new(file_name).exists() { let mut file_info = File::open(file_name)?; let mut data = Vec::new(); file_info.read_to_end(&mut data)?; self.deserialize_info(data)?; } Ok(()) } /// serialize block hashes info pub fn serialize_info(&self) -> Vec<u8> { let mut buf = Vec::new(); //serialize sign_count let len = self.sign_count.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for (sign, count) in self.sign_count.iter() { buf.extend_from_slice(&(*sign as u64).to_le_bytes()); buf.extend_from_slice(&(*count as u64).to_le_bytes()); } //serialize headers let len = self.headers.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for h in self.headers.iter() { let (hash, validators) = h.clone(); buf.append(&mut hash.0.to_vec()); let keys_count = validators.len(); buf.extend_from_slice(&(keys_count as u32).to_le_bytes()); for v in validators.iter() { buf.extend_from_slice(&(*v as u64).to_le_bytes()); } } buf } /// deserialize block hashes info pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> { let mut rdr = Cursor::new(data); // deserialize sing_count let len = rdr.read_le_u32()?; for _ in 0..len { let sign = rdr.read_le_u64()?; let count = rdr.read_le_u64()? as usize; self.sign_count.insert(sign, count); } // deserialize headers let len = rdr.read_le_u32()?; for _ in 0..len { let hash = rdr.read_u256()?; let keys_count = rdr.read_le_u32()?; let mut keys: Vec<u64> = vec![]; for _ in 0..keys_count { keys.push(rdr.read_le_u64()?); } self.headers.push_back((H256(hash), keys)); } Ok(()) } /// Get the validator set. pub fn validators(&self) -> &SimpleList { &self.signers } /// Remove last validator from list pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> { self.headers.pop_back() } /// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) /// /// Fails if `signer` isn't a member of the active validator set. /// Returns a list of all newly finalized headers. // TODO: optimize with smallvec. pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> { self.check_signers(&signers)?; for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } self.headers.push_back((head.clone(), signers)); let mut newly_finalized = Vec::new(); while self.sign_count.len() * 2 > self.signers.len() { let (hash, signers) = self.headers.pop_front() .expect("headers length always greater than sign count length; qed"); newly_finalized.push(hash); for signer in signers { match self.sign_count.entry(signer) { Entry::Occupied(mut entry) => { // decrement count for this signer and purge on zero. *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"), } } } trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); self.last_pushed = Some(head); Ok(newly_finalized) } fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> { for s in signers.iter() { if!self.signers.contains_id(s) { return Err(UnknownValidator) } } Ok(()) } } #[cfg(test)] mod tests { use ed25519_dalek::PublicKey; use std::fs; use std::path::Path; use ton_block::id_from_key; use super::RollingFinality; use engines::authority_round::subst::{H256}; #[test] fn test_serialation() { let vec = (0..7).map(|_| { let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng()); ed25519_dalek::PublicKey::from(&pvt_key) }).collect::<Vec<ed25519_dalek::PublicKey>>(); let mut bytes = [0u8; 8]; bytes.copy_from_slice(&vec[0].as_bytes()[0..8]); let v1 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[1].as_bytes()[0..8]); let v2 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[2].as_bytes()[0..8]); let v3 = u64::from_be_bytes(bytes); let mut rf = RollingFinality::blank(vec); rf.push_hash(H256([0;32]), vec![v1]).unwrap(); rf.push_hash(H256([1;32]), vec![v2]).unwrap(); rf.push_hash(H256([2;32]), vec![v1]).unwrap(); rf.push_hash(H256([4;32]), vec![v3]).unwrap(); rf.push_hash(H256([5;32]), vec![v3]).unwrap(); let data = rf.serialize_info(); println!("{:?}", data); let mut rf2 = RollingFinality::blank(vec![]); rf2.deserialize_info(data).unwrap(); assert_eq!(rf.headers, rf2.headers); } fn get_keys(n: usize) -> (Vec<PublicKey>, Vec<u64>) { let mut keys = Vec::new(); let mut kids = Vec::new(); for i in 0..n { let name = format!("../config/pub{:02}", i+1); let data = fs::read(Path::new(&name)) .expect(&format!("Error reading key file {}", name)); let key = PublicKey::from_bytes(&data).unwrap(); kids.push(id_from_key(&key)); keys.push(key); } (keys, kids) } #[test] fn
() { let (signers, key_ids) = get_keys(3); let mut finality = RollingFinality::blank(signers); assert!(finality.push_hash(H256::random(), vec![key_ids[0], 0xAA]).is_err()); } #[test] fn finalize_multiple() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); // 3 / 6 signers is < 51% so no finality. for (i, hash) in hashes.iter().take(6).cloned().enumerate() { let i = i % 3; assert!(finality.push_hash(hash, vec![key_ids[i]]).unwrap().len() == 0); } // after pushing a block signed by a fourth validator, the first four // blocks of the unverified chain become verified. assert_eq!( finality.push_hash(hashes[6].clone(), vec![key_ids[4]]).unwrap(), vec![hashes[0].clone(), hashes[1].clone(), hashes[2].clone(), hashes[3].clone()] ); } #[test] fn finalize_multiple_signers() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hash = H256::random(); // after pushing a block signed by four validators, it becomes verified right away. assert_eq!(finality.push_hash(hash.clone(), key_ids).unwrap(), vec![hash]); } #[test] fn from_ancestry() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| (H256::random(), vec![key_ids[i % 6]]) ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); assert_eq!(finality.unfinalized_hashes().count(), 3); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } #[test] fn from_ancestry_multiple_signers() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| { (H256::random(), vec![key_ids[i % 6], key_ids[(i + 1) % 6], key_ids[(i + 2) % 6]]) } ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); // only the last hash has < 51% of authorities' signatures assert_eq!(finality.unfinalized_hashes().count(), 1); assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].clone().0)); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } }
rejects_unknown_signers
identifier_name
finality.rs
// Copyright 2015-2018 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Finality proof generation and checking. use ed25519_dalek::{PublicKey}; use std::collections::{VecDeque}; use std::collections::hash_map::{HashMap, Entry}; use engines::authority_round::subst::{H256}; use engines::validator_set::SimpleList; use std::io::Cursor; use std::io::{ Read, Write }; use std::fs::{File}; use std::path::Path; use ton_types::types::ByteOrderRead; /// Error indicating unknown validator. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct UnknownValidator; /// Rolling finality checker for authority round consensus. /// Stores a chain of unfinalized hashes that can be pushed onto. #[derive(Debug, PartialEq)] pub struct RollingFinality { headers: VecDeque<(H256, Vec<u64>)>, signers: SimpleList, sign_count: HashMap<u64, usize>, last_pushed: Option<H256>, } impl RollingFinality {
RollingFinality { headers: VecDeque::new(), signers: SimpleList::new(signers), sign_count: HashMap::new(), last_pushed: None, } } pub fn add_signer(&mut self, signer: PublicKey) { self.signers.add(signer) } pub fn remove_signer(&mut self, signer: &u64) { self.signers.remove_by_id(signer) } /// Extract unfinalized subchain from ancestry iterator. /// Clears the current subchain. /// /// Fails if any provided signature isn't part of the signers set. pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator> where I: IntoIterator<Item=(H256, Vec<u64>)> { self.clear(); for (hash, signers) in iterable { self.check_signers(&signers)?; if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) } // break when we've got our first finalized block. { let current_signed = self.sign_count.len(); let new_signers = signers.iter().filter(|s|!self.sign_count.contains_key(s)).count(); let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); if would_be_finalized { trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone()); break } for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } } self.headers.push_front((hash, signers)); } trace!(target: "finality", "Rolling finality state: {:?}", self.headers); Ok(()) } /// Clear the finality status, but keeps the validator set. pub fn clear(&mut self) { self.headers.clear(); self.sign_count.clear(); self.last_pushed = None; } /// Returns the last pushed hash. pub fn subchain_head(&self) -> Option<H256> { self.last_pushed.clone() } /// Get an iterator over stored hashes in order. #[cfg(test)] pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> { self.headers.iter().map(|(h, _)| h) } pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> { let mut file_info = File::create(file_name)?; let data = self.serialize_info(); file_info.write_all(&data)?; file_info.flush()?; Ok(()) } pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> { if Path::new(file_name).exists() { let mut file_info = File::open(file_name)?; let mut data = Vec::new(); file_info.read_to_end(&mut data)?; self.deserialize_info(data)?; } Ok(()) } /// serialize block hashes info pub fn serialize_info(&self) -> Vec<u8> { let mut buf = Vec::new(); //serialize sign_count let len = self.sign_count.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for (sign, count) in self.sign_count.iter() { buf.extend_from_slice(&(*sign as u64).to_le_bytes()); buf.extend_from_slice(&(*count as u64).to_le_bytes()); } //serialize headers let len = self.headers.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for h in self.headers.iter() { let (hash, validators) = h.clone(); buf.append(&mut hash.0.to_vec()); let keys_count = validators.len(); buf.extend_from_slice(&(keys_count as u32).to_le_bytes()); for v in validators.iter() { buf.extend_from_slice(&(*v as u64).to_le_bytes()); } } buf } /// deserialize block hashes info pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> { let mut rdr = Cursor::new(data); // deserialize sing_count let len = rdr.read_le_u32()?; for _ in 0..len { let sign = rdr.read_le_u64()?; let count = rdr.read_le_u64()? as usize; self.sign_count.insert(sign, count); } // deserialize headers let len = rdr.read_le_u32()?; for _ in 0..len { let hash = rdr.read_u256()?; let keys_count = rdr.read_le_u32()?; let mut keys: Vec<u64> = vec![]; for _ in 0..keys_count { keys.push(rdr.read_le_u64()?); } self.headers.push_back((H256(hash), keys)); } Ok(()) } /// Get the validator set. pub fn validators(&self) -> &SimpleList { &self.signers } /// Remove last validator from list pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> { self.headers.pop_back() } /// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) /// /// Fails if `signer` isn't a member of the active validator set. /// Returns a list of all newly finalized headers. // TODO: optimize with smallvec. pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> { self.check_signers(&signers)?; for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } self.headers.push_back((head.clone(), signers)); let mut newly_finalized = Vec::new(); while self.sign_count.len() * 2 > self.signers.len() { let (hash, signers) = self.headers.pop_front() .expect("headers length always greater than sign count length; qed"); newly_finalized.push(hash); for signer in signers { match self.sign_count.entry(signer) { Entry::Occupied(mut entry) => { // decrement count for this signer and purge on zero. *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"), } } } trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); self.last_pushed = Some(head); Ok(newly_finalized) } fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> { for s in signers.iter() { if!self.signers.contains_id(s) { return Err(UnknownValidator) } } Ok(()) } } #[cfg(test)] mod tests { use ed25519_dalek::PublicKey; use std::fs; use std::path::Path; use ton_block::id_from_key; use super::RollingFinality; use engines::authority_round::subst::{H256}; #[test] fn test_serialation() { let vec = (0..7).map(|_| { let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng()); ed25519_dalek::PublicKey::from(&pvt_key) }).collect::<Vec<ed25519_dalek::PublicKey>>(); let mut bytes = [0u8; 8]; bytes.copy_from_slice(&vec[0].as_bytes()[0..8]); let v1 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[1].as_bytes()[0..8]); let v2 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[2].as_bytes()[0..8]); let v3 = u64::from_be_bytes(bytes); let mut rf = RollingFinality::blank(vec); rf.push_hash(H256([0;32]), vec![v1]).unwrap(); rf.push_hash(H256([1;32]), vec![v2]).unwrap(); rf.push_hash(H256([2;32]), vec![v1]).unwrap(); rf.push_hash(H256([4;32]), vec![v3]).unwrap(); rf.push_hash(H256([5;32]), vec![v3]).unwrap(); let data = rf.serialize_info(); println!("{:?}", data); let mut rf2 = RollingFinality::blank(vec![]); rf2.deserialize_info(data).unwrap(); assert_eq!(rf.headers, rf2.headers); } fn get_keys(n: usize) -> (Vec<PublicKey>, Vec<u64>) { let mut keys = Vec::new(); let mut kids = Vec::new(); for i in 0..n { let name = format!("../config/pub{:02}", i+1); let data = fs::read(Path::new(&name)) .expect(&format!("Error reading key file {}", name)); let key = PublicKey::from_bytes(&data).unwrap(); kids.push(id_from_key(&key)); keys.push(key); } (keys, kids) } #[test] fn rejects_unknown_signers() { let (signers, key_ids) = get_keys(3); let mut finality = RollingFinality::blank(signers); assert!(finality.push_hash(H256::random(), vec![key_ids[0], 0xAA]).is_err()); } #[test] fn finalize_multiple() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); // 3 / 6 signers is < 51% so no finality. for (i, hash) in hashes.iter().take(6).cloned().enumerate() { let i = i % 3; assert!(finality.push_hash(hash, vec![key_ids[i]]).unwrap().len() == 0); } // after pushing a block signed by a fourth validator, the first four // blocks of the unverified chain become verified. assert_eq!( finality.push_hash(hashes[6].clone(), vec![key_ids[4]]).unwrap(), vec![hashes[0].clone(), hashes[1].clone(), hashes[2].clone(), hashes[3].clone()] ); } #[test] fn finalize_multiple_signers() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hash = H256::random(); // after pushing a block signed by four validators, it becomes verified right away. assert_eq!(finality.push_hash(hash.clone(), key_ids).unwrap(), vec![hash]); } #[test] fn from_ancestry() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| (H256::random(), vec![key_ids[i % 6]]) ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); assert_eq!(finality.unfinalized_hashes().count(), 3); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } #[test] fn from_ancestry_multiple_signers() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| { (H256::random(), vec![key_ids[i % 6], key_ids[(i + 1) % 6], key_ids[(i + 2) % 6]]) } ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); // only the last hash has < 51% of authorities' signatures assert_eq!(finality.unfinalized_hashes().count(), 1); assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].clone().0)); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } }
/// Create a blank finality checker under the given validator set. pub fn blank(signers: Vec<PublicKey>) -> Self {
random_line_split
finality.rs
// Copyright 2015-2018 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Parity is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Parity. If not, see <http://www.gnu.org/licenses/>. //! Finality proof generation and checking. use ed25519_dalek::{PublicKey}; use std::collections::{VecDeque}; use std::collections::hash_map::{HashMap, Entry}; use engines::authority_round::subst::{H256}; use engines::validator_set::SimpleList; use std::io::Cursor; use std::io::{ Read, Write }; use std::fs::{File}; use std::path::Path; use ton_types::types::ByteOrderRead; /// Error indicating unknown validator. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct UnknownValidator; /// Rolling finality checker for authority round consensus. /// Stores a chain of unfinalized hashes that can be pushed onto. #[derive(Debug, PartialEq)] pub struct RollingFinality { headers: VecDeque<(H256, Vec<u64>)>, signers: SimpleList, sign_count: HashMap<u64, usize>, last_pushed: Option<H256>, } impl RollingFinality { /// Create a blank finality checker under the given validator set. pub fn blank(signers: Vec<PublicKey>) -> Self { RollingFinality { headers: VecDeque::new(), signers: SimpleList::new(signers), sign_count: HashMap::new(), last_pushed: None, } } pub fn add_signer(&mut self, signer: PublicKey) { self.signers.add(signer) } pub fn remove_signer(&mut self, signer: &u64) { self.signers.remove_by_id(signer) } /// Extract unfinalized subchain from ancestry iterator. /// Clears the current subchain. /// /// Fails if any provided signature isn't part of the signers set. pub fn build_ancestry_subchain<I>(&mut self, iterable: I) -> Result<(), UnknownValidator> where I: IntoIterator<Item=(H256, Vec<u64>)> { self.clear(); for (hash, signers) in iterable { self.check_signers(&signers)?; if self.last_pushed.is_none() { self.last_pushed = Some(hash.clone()) } // break when we've got our first finalized block. { let current_signed = self.sign_count.len(); let new_signers = signers.iter().filter(|s|!self.sign_count.contains_key(s)).count(); let would_be_finalized = (current_signed + new_signers) * 2 > self.signers.len(); if would_be_finalized
for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } } self.headers.push_front((hash, signers)); } trace!(target: "finality", "Rolling finality state: {:?}", self.headers); Ok(()) } /// Clear the finality status, but keeps the validator set. pub fn clear(&mut self) { self.headers.clear(); self.sign_count.clear(); self.last_pushed = None; } /// Returns the last pushed hash. pub fn subchain_head(&self) -> Option<H256> { self.last_pushed.clone() } /// Get an iterator over stored hashes in order. #[cfg(test)] pub fn unfinalized_hashes(&self) -> impl Iterator<Item=&H256> { self.headers.iter().map(|(h, _)| h) } pub fn save(&self, file_name: &str) -> Result<(), std::io::Error> { let mut file_info = File::create(file_name)?; let data = self.serialize_info(); file_info.write_all(&data)?; file_info.flush()?; Ok(()) } pub fn load(&mut self, file_name: &str) -> Result<(), std::io::Error> { if Path::new(file_name).exists() { let mut file_info = File::open(file_name)?; let mut data = Vec::new(); file_info.read_to_end(&mut data)?; self.deserialize_info(data)?; } Ok(()) } /// serialize block hashes info pub fn serialize_info(&self) -> Vec<u8> { let mut buf = Vec::new(); //serialize sign_count let len = self.sign_count.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for (sign, count) in self.sign_count.iter() { buf.extend_from_slice(&(*sign as u64).to_le_bytes()); buf.extend_from_slice(&(*count as u64).to_le_bytes()); } //serialize headers let len = self.headers.len(); buf.extend_from_slice(&(len as u32).to_le_bytes()); for h in self.headers.iter() { let (hash, validators) = h.clone(); buf.append(&mut hash.0.to_vec()); let keys_count = validators.len(); buf.extend_from_slice(&(keys_count as u32).to_le_bytes()); for v in validators.iter() { buf.extend_from_slice(&(*v as u64).to_le_bytes()); } } buf } /// deserialize block hashes info pub fn deserialize_info(&mut self, data: Vec<u8>) -> Result<(), std::io::Error> { let mut rdr = Cursor::new(data); // deserialize sing_count let len = rdr.read_le_u32()?; for _ in 0..len { let sign = rdr.read_le_u64()?; let count = rdr.read_le_u64()? as usize; self.sign_count.insert(sign, count); } // deserialize headers let len = rdr.read_le_u32()?; for _ in 0..len { let hash = rdr.read_u256()?; let keys_count = rdr.read_le_u32()?; let mut keys: Vec<u64> = vec![]; for _ in 0..keys_count { keys.push(rdr.read_le_u64()?); } self.headers.push_back((H256(hash), keys)); } Ok(()) } /// Get the validator set. pub fn validators(&self) -> &SimpleList { &self.signers } /// Remove last validator from list pub fn remove_last(&mut self) -> Option<(H256, Vec<u64>)> { self.headers.pop_back() } /// Push a hash onto the rolling finality checker (implying `subchain_head` == head.parent) /// /// Fails if `signer` isn't a member of the active validator set. /// Returns a list of all newly finalized headers. // TODO: optimize with smallvec. pub fn push_hash(&mut self, head: H256, signers: Vec<u64>) -> Result<Vec<H256>, UnknownValidator> { self.check_signers(&signers)?; for signer in signers.iter() { *self.sign_count.entry(signer.clone()).or_insert(0) += 1; } self.headers.push_back((head.clone(), signers)); let mut newly_finalized = Vec::new(); while self.sign_count.len() * 2 > self.signers.len() { let (hash, signers) = self.headers.pop_front() .expect("headers length always greater than sign count length; qed"); newly_finalized.push(hash); for signer in signers { match self.sign_count.entry(signer) { Entry::Occupied(mut entry) => { // decrement count for this signer and purge on zero. *entry.get_mut() -= 1; if *entry.get() == 0 { entry.remove(); } } Entry::Vacant(_) => panic!("all hashes in `header` should have entries in `sign_count` for their signers; qed"), } } } trace!(target: "finality", "Blocks finalized by {:?}: {:?}", head, newly_finalized); self.last_pushed = Some(head); Ok(newly_finalized) } fn check_signers(&self, signers: &Vec<u64>) -> Result<(), UnknownValidator> { for s in signers.iter() { if!self.signers.contains_id(s) { return Err(UnknownValidator) } } Ok(()) } } #[cfg(test)] mod tests { use ed25519_dalek::PublicKey; use std::fs; use std::path::Path; use ton_block::id_from_key; use super::RollingFinality; use engines::authority_round::subst::{H256}; #[test] fn test_serialation() { let vec = (0..7).map(|_| { let pvt_key = ed25519_dalek::SecretKey::generate(&mut rand::thread_rng()); ed25519_dalek::PublicKey::from(&pvt_key) }).collect::<Vec<ed25519_dalek::PublicKey>>(); let mut bytes = [0u8; 8]; bytes.copy_from_slice(&vec[0].as_bytes()[0..8]); let v1 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[1].as_bytes()[0..8]); let v2 = u64::from_be_bytes(bytes); bytes.copy_from_slice(&vec[2].as_bytes()[0..8]); let v3 = u64::from_be_bytes(bytes); let mut rf = RollingFinality::blank(vec); rf.push_hash(H256([0;32]), vec![v1]).unwrap(); rf.push_hash(H256([1;32]), vec![v2]).unwrap(); rf.push_hash(H256([2;32]), vec![v1]).unwrap(); rf.push_hash(H256([4;32]), vec![v3]).unwrap(); rf.push_hash(H256([5;32]), vec![v3]).unwrap(); let data = rf.serialize_info(); println!("{:?}", data); let mut rf2 = RollingFinality::blank(vec![]); rf2.deserialize_info(data).unwrap(); assert_eq!(rf.headers, rf2.headers); } fn get_keys(n: usize) -> (Vec<PublicKey>, Vec<u64>) { let mut keys = Vec::new(); let mut kids = Vec::new(); for i in 0..n { let name = format!("../config/pub{:02}", i+1); let data = fs::read(Path::new(&name)) .expect(&format!("Error reading key file {}", name)); let key = PublicKey::from_bytes(&data).unwrap(); kids.push(id_from_key(&key)); keys.push(key); } (keys, kids) } #[test] fn rejects_unknown_signers() { let (signers, key_ids) = get_keys(3); let mut finality = RollingFinality::blank(signers); assert!(finality.push_hash(H256::random(), vec![key_ids[0], 0xAA]).is_err()); } #[test] fn finalize_multiple() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hashes: Vec<_> = (0..7).map(|_| H256::random()).collect(); // 3 / 6 signers is < 51% so no finality. for (i, hash) in hashes.iter().take(6).cloned().enumerate() { let i = i % 3; assert!(finality.push_hash(hash, vec![key_ids[i]]).unwrap().len() == 0); } // after pushing a block signed by a fourth validator, the first four // blocks of the unverified chain become verified. assert_eq!( finality.push_hash(hashes[6].clone(), vec![key_ids[4]]).unwrap(), vec![hashes[0].clone(), hashes[1].clone(), hashes[2].clone(), hashes[3].clone()] ); } #[test] fn finalize_multiple_signers() { let (signers, key_ids) = get_keys(6); let mut finality = RollingFinality::blank(signers); let hash = H256::random(); // after pushing a block signed by four validators, it becomes verified right away. assert_eq!(finality.push_hash(hash.clone(), key_ids).unwrap(), vec![hash]); } #[test] fn from_ancestry() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| (H256::random(), vec![key_ids[i % 6]]) ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); assert_eq!(finality.unfinalized_hashes().count(), 3); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } #[test] fn from_ancestry_multiple_signers() { let (signers, key_ids) = get_keys(6); let hashes: Vec<_> = (0..12).map( |i| { (H256::random(), vec![key_ids[i % 6], key_ids[(i + 1) % 6], key_ids[(i + 2) % 6]]) } ).collect(); let mut finality = RollingFinality::blank(signers); finality.build_ancestry_subchain(hashes.iter().rev().cloned()).unwrap(); // only the last hash has < 51% of authorities' signatures assert_eq!(finality.unfinalized_hashes().count(), 1); assert_eq!(finality.unfinalized_hashes().next(), Some(&hashes[11].clone().0)); assert_eq!(finality.subchain_head(), Some(hashes[11].clone().0)); } }
{ trace!(target: "finality", "Encountered already finalized block {:?}", hash.clone()); break }
conditional_block
ffi.rs
//! Internals of the `libsvm` FFI. //! //! Objects whose names start with `Libsvm` are for the most part //! things that we pass or get directly from `libsvm`, and are highly //! fragile. //! //! Their safe, memory-owning counterparts start with `Svm`. use std::ffi::CStr; use std::os::raw::c_char; use std::slice; use prelude::*; /// SVM type. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SvmType { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, } /// Type of the kernel used by the SVM. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KernelType { Linear, Polynomial, RBF, Sigmoid, /// Not implemented. Precomputed, } /// Libsvm uses a sparse representation of data, /// where every entry in the training matrix /// is characterised by a column index and a value. /// Because this is a safe Rust-like object in itself, /// it does not have a safe counterpart. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct LibsvmNode { index: i32, value: f64, } impl LibsvmNode { fn new(index: i32, value: f64) -> LibsvmNode { LibsvmNode { index: index, value: value, } } } /// Libsvm structure representing training data. #[repr(C)] struct LibsvmProblem { /// Number of rows in the training data. l: i32, y: *const f64, /// Rows of the X matrix. Because row lenghts /// are not stored anywhere, and do not need /// to be equal, `libsvm` uses index = -1 as /// a sentinel value. svm_node: *const *const LibsvmNode, } /// Safe version of `LibsvmProblem`. pub struct SvmProblem { nodes: Vec<Vec<LibsvmNode>>, node_ptrs: Vec<*const LibsvmNode>, y: Vec<f64>, } /// Conert a row of the X matrix to its Libsvm representation. fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> { let mut nodes = Vec::new(); for (index, value) in row.iter_nonzero() { nodes.push(LibsvmNode::new(index as i32, value as f64)); } // Sentinel value for end of row nodes.push(LibsvmNode::new(-1, 0.0)); nodes } impl SvmProblem { /// Create a new `SvmProblem` from training data. pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem where T: IndexableMatrix, &'a T: RowIterable, { let mut nodes = Vec::with_capacity(X.rows()); for row in X.iter_rows() { let row_nodes = row_to_nodes(row); nodes.push(row_nodes) } let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>(); SvmProblem { nodes: nodes, node_ptrs: node_ptrs, y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(), } } /// Returns the unsafe object that can be passed into `libsvm`. fn build_problem(&self) -> LibsvmProblem { LibsvmProblem { l: self.nodes.len() as i32, y: self.y.as_ptr(), svm_node: self.node_ptrs.as_ptr(), } } } /// `libsvm` representation of training parameters. #[repr(C)] struct LibsvmParameter { svm_type: SvmType, kernel_type: KernelType, degree: i32, gamma: f64, coef0: f64, cache_size: f64, eps: f64, C: f64, nr_weight: i32, weight_label: *const i32, weight: *const f64, nu: f64, p: f64, shrinking: i32, probability: i32, } /// Safe representation of `LibsvmParameter`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmParameter { svm_type: SvmType, kernel_type: KernelType, pub degree: i32, pub gamma: f64, pub coef0: f64, pub cache_size: f64, eps: f64, pub C: f64, nr_weight: i32, weight_label: Vec<i32>, weight: Vec<f64>, nu: f64, p: f64, shrinking: i32, probability: i32, } impl SvmParameter { pub fn new( svm_type: SvmType, kernel_type: KernelType, num_classes: usize, dim: usize, ) -> SvmParameter { SvmParameter { svm_type: svm_type, kernel_type: kernel_type, degree: 3, gamma: 1.0 / dim as f64, C: 1.0, coef0: 0.0, cache_size: 100.0, eps: 0.1, nr_weight: num_classes as i32, weight: vec![1.0; num_classes], weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(), nu: 0.5, p: 0.1, shrinking: 1, probability: 0, } } /// Returns the parameter object to be passed into /// `libsvm` functions. fn build_libsvm_parameter(&self) -> LibsvmParameter { LibsvmParameter { svm_type: self.svm_type.clone(), kernel_type: self.kernel_type.clone(), degree: self.degree, gamma: self.gamma, C: self.C, coef0: self.coef0, cache_size: self.cache_size, eps: self.eps, nr_weight: self.nr_weight, weight: self.weight.as_ptr(), weight_label: self.weight_label.as_ptr(), nu: self.nu, p: self.p, shrinking: self.shrinking, probability: self.probability, } } } /// The model object returned from and accepted by /// `libsvm` functions. #[repr(C)] struct LibsvmModel { svm_parameter: LibsvmParameter, nr_class: i32, l: i32, SV: *const *const LibsvmNode, sv_coef: *const *const f64, rho: *const f64, probA: *const f64, probB: *const f64, sv_indices: *const i32, label: *const i32, nSV: *const i32, free_sv: i32, } /// Safe representation of `LibsvmModel`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmModel { svm_parameter: SvmParameter, nr_class: i32, l: i32, SV: Vec<Vec<LibsvmNode>>, sv_coef: Vec<Vec<f64>>, rho: Vec<f64>, probA: Vec<f64>, probB: Vec<f64>, sv_indices: Vec<i32>, label: Vec<i32>, nSV: Vec<i32>, free_sv: i32, } impl SvmModel { fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel { unsafe { SvmModel { svm_parameter: param, nr_class: (*model_ptr).nr_class, l: (*model_ptr).l, SV: SvmModel::get_SV(model_ptr), sv_coef: SvmModel::get_sv_coef(model_ptr), rho: SvmModel::get_rho(model_ptr), probA: vec![0.0], probB: vec![0.0], sv_indices: vec![0], label: SvmModel::get_label(model_ptr), nSV: SvmModel::get_nSV(model_ptr), free_sv: 0, } } } fn get_libsvm_model( &self, SV_ptrs: &mut Vec<*const LibsvmNode>, sv_coef_ptrs: &mut Vec<*const f64>, ) -> LibsvmModel { SV_ptrs.clear(); sv_coef_ptrs.clear(); for x in &self.SV { SV_ptrs.push(x.as_ptr()); } for x in &self.sv_coef { sv_coef_ptrs.push(x.as_ptr()); } LibsvmModel { svm_parameter: self.svm_parameter.build_libsvm_parameter(), nr_class: self.nr_class, l: self.l, SV: SV_ptrs.as_ptr(), sv_coef: sv_coef_ptrs.as_ptr(), rho: self.rho.as_ptr(), probA: self.probA.as_ptr(), probB: self.probB.as_ptr(), sv_indices: self.sv_indices.as_ptr(), label: self.label.as_ptr(), nSV: self.nSV.as_ptr(), free_sv: self.free_sv, } } unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned() } unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).label, nr_class).to_owned() } unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> { let l = (*model_ptr).l; let mut sv_rows = Vec::with_capacity(l as usize); let sv_ptr = (*model_ptr).SV; for row in 0..l { let mut sv_row = Vec::new(); let sv_row_ptr = *sv_ptr.offset(row as isize); let mut i = 0; loop { let node = (*sv_row_ptr.offset(i as isize)).clone(); sv_row.push(node.clone()); if node.index == -1 { break; } i += 1; } sv_rows.push(sv_row); } sv_rows } unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> { let mut nr_class = (*model_ptr).nr_class as usize; nr_class = nr_class * (nr_class - 1) / 2; slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned() } unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> { let nr_class = (*model_ptr).nr_class as usize; let l = (*model_ptr).l as usize; slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1) .iter() .map(|&x| slice::from_raw_parts(x, l).to_owned()) .collect::<Vec<_>>() } } extern "C" { fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel; fn svm_predict_values( svm_model: *mut LibsvmModel, svm_nodes: *const LibsvmNode, out: *const f64, ) -> f64; fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel); fn svm_check_parameter( problem: *const LibsvmProblem, param: *const LibsvmParameter, ) -> *const c_char; } fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> { unsafe { let message = svm_check_parameter(problem, param); if message.is_null() { Ok(()) } else
} } /// Fit a `libsvm` model. pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str> where T: IndexableMatrix, &'a T: RowIterable, { let problem = SvmProblem::new(X, y); let libsvm_problem = problem.build_problem(); let libsvm_param = parameters.build_libsvm_parameter(); let model_ptr = unsafe { match check( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) { Ok(_) => {} Err(error_str) => { // A bit of a horrible out-of-band error reporting, // we should switch the model traits to String errors println!("Libsvm check error: {}", error_str); return Err("Invalid libsvm parameters."); } }; svm_train( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) }; let model = SvmModel::new(parameters.clone(), model_ptr); unsafe { // Free the model data allocated by libsvm, // we've got our own, sane copy. svm_free_and_destroy_model(&model_ptr); } Ok(model) } /// Call `libsvm` to get predictions (both predicted classes /// and `OvO` decision function values. pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array) where T: IndexableMatrix, &'a T: RowIterable, { let x_rows = X.rows(); let num_classes = model.nr_class as usize; let ovo_num_classes = num_classes * (num_classes - 1) / 2; // We are actually mutating this in C, but convincing rustc that is // safe is a bit of a pain let df = vec![0.0; x_rows * ovo_num_classes]; let mut df_slice = &df[..]; let mut predicted_class = Vec::with_capacity(x_rows); // Allocate space for pointers to support vector components, // we don't need them after we're finished here // so they will be freed. let mut sv_ptrs = Vec::new(); let mut sv_coef_ptrs = Vec::new(); let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs); for (_, row) in X.iter_rows().enumerate() { let nodes = row_to_nodes(row); unsafe { predicted_class.push(svm_predict_values( &mut libsvm_model as *mut LibsvmModel, nodes.as_ptr(), df_slice.as_ptr(), ) as f32); } df_slice = &df_slice[ovo_num_classes..]; } let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>(); let mut df_array = Array::from(df_data); df_array.reshape(x_rows, ovo_num_classes); (df_array, Array::from(predicted_class)) }
{ Err(CStr::from_ptr(message).to_str().unwrap().to_owned()) }
conditional_block
ffi.rs
//! Internals of the `libsvm` FFI. //! //! Objects whose names start with `Libsvm` are for the most part //! things that we pass or get directly from `libsvm`, and are highly //! fragile. //! //! Their safe, memory-owning counterparts start with `Svm`. use std::ffi::CStr; use std::os::raw::c_char; use std::slice; use prelude::*; /// SVM type. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SvmType { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, } /// Type of the kernel used by the SVM. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KernelType { Linear, Polynomial, RBF, Sigmoid, /// Not implemented. Precomputed, } /// Libsvm uses a sparse representation of data, /// where every entry in the training matrix /// is characterised by a column index and a value. /// Because this is a safe Rust-like object in itself, /// it does not have a safe counterpart. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct LibsvmNode { index: i32, value: f64, } impl LibsvmNode { fn new(index: i32, value: f64) -> LibsvmNode { LibsvmNode { index: index, value: value, } } } /// Libsvm structure representing training data. #[repr(C)] struct LibsvmProblem { /// Number of rows in the training data. l: i32, y: *const f64, /// Rows of the X matrix. Because row lenghts /// are not stored anywhere, and do not need /// to be equal, `libsvm` uses index = -1 as /// a sentinel value. svm_node: *const *const LibsvmNode, } /// Safe version of `LibsvmProblem`. pub struct SvmProblem { nodes: Vec<Vec<LibsvmNode>>, node_ptrs: Vec<*const LibsvmNode>, y: Vec<f64>, } /// Conert a row of the X matrix to its Libsvm representation. fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> { let mut nodes = Vec::new(); for (index, value) in row.iter_nonzero() { nodes.push(LibsvmNode::new(index as i32, value as f64)); } // Sentinel value for end of row nodes.push(LibsvmNode::new(-1, 0.0)); nodes } impl SvmProblem { /// Create a new `SvmProblem` from training data. pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem where T: IndexableMatrix, &'a T: RowIterable, { let mut nodes = Vec::with_capacity(X.rows()); for row in X.iter_rows() { let row_nodes = row_to_nodes(row); nodes.push(row_nodes) } let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>(); SvmProblem { nodes: nodes, node_ptrs: node_ptrs, y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(), } } /// Returns the unsafe object that can be passed into `libsvm`. fn build_problem(&self) -> LibsvmProblem { LibsvmProblem { l: self.nodes.len() as i32, y: self.y.as_ptr(), svm_node: self.node_ptrs.as_ptr(), } } } /// `libsvm` representation of training parameters. #[repr(C)] struct LibsvmParameter { svm_type: SvmType,
cache_size: f64, eps: f64, C: f64, nr_weight: i32, weight_label: *const i32, weight: *const f64, nu: f64, p: f64, shrinking: i32, probability: i32, } /// Safe representation of `LibsvmParameter`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmParameter { svm_type: SvmType, kernel_type: KernelType, pub degree: i32, pub gamma: f64, pub coef0: f64, pub cache_size: f64, eps: f64, pub C: f64, nr_weight: i32, weight_label: Vec<i32>, weight: Vec<f64>, nu: f64, p: f64, shrinking: i32, probability: i32, } impl SvmParameter { pub fn new( svm_type: SvmType, kernel_type: KernelType, num_classes: usize, dim: usize, ) -> SvmParameter { SvmParameter { svm_type: svm_type, kernel_type: kernel_type, degree: 3, gamma: 1.0 / dim as f64, C: 1.0, coef0: 0.0, cache_size: 100.0, eps: 0.1, nr_weight: num_classes as i32, weight: vec![1.0; num_classes], weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(), nu: 0.5, p: 0.1, shrinking: 1, probability: 0, } } /// Returns the parameter object to be passed into /// `libsvm` functions. fn build_libsvm_parameter(&self) -> LibsvmParameter { LibsvmParameter { svm_type: self.svm_type.clone(), kernel_type: self.kernel_type.clone(), degree: self.degree, gamma: self.gamma, C: self.C, coef0: self.coef0, cache_size: self.cache_size, eps: self.eps, nr_weight: self.nr_weight, weight: self.weight.as_ptr(), weight_label: self.weight_label.as_ptr(), nu: self.nu, p: self.p, shrinking: self.shrinking, probability: self.probability, } } } /// The model object returned from and accepted by /// `libsvm` functions. #[repr(C)] struct LibsvmModel { svm_parameter: LibsvmParameter, nr_class: i32, l: i32, SV: *const *const LibsvmNode, sv_coef: *const *const f64, rho: *const f64, probA: *const f64, probB: *const f64, sv_indices: *const i32, label: *const i32, nSV: *const i32, free_sv: i32, } /// Safe representation of `LibsvmModel`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmModel { svm_parameter: SvmParameter, nr_class: i32, l: i32, SV: Vec<Vec<LibsvmNode>>, sv_coef: Vec<Vec<f64>>, rho: Vec<f64>, probA: Vec<f64>, probB: Vec<f64>, sv_indices: Vec<i32>, label: Vec<i32>, nSV: Vec<i32>, free_sv: i32, } impl SvmModel { fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel { unsafe { SvmModel { svm_parameter: param, nr_class: (*model_ptr).nr_class, l: (*model_ptr).l, SV: SvmModel::get_SV(model_ptr), sv_coef: SvmModel::get_sv_coef(model_ptr), rho: SvmModel::get_rho(model_ptr), probA: vec![0.0], probB: vec![0.0], sv_indices: vec![0], label: SvmModel::get_label(model_ptr), nSV: SvmModel::get_nSV(model_ptr), free_sv: 0, } } } fn get_libsvm_model( &self, SV_ptrs: &mut Vec<*const LibsvmNode>, sv_coef_ptrs: &mut Vec<*const f64>, ) -> LibsvmModel { SV_ptrs.clear(); sv_coef_ptrs.clear(); for x in &self.SV { SV_ptrs.push(x.as_ptr()); } for x in &self.sv_coef { sv_coef_ptrs.push(x.as_ptr()); } LibsvmModel { svm_parameter: self.svm_parameter.build_libsvm_parameter(), nr_class: self.nr_class, l: self.l, SV: SV_ptrs.as_ptr(), sv_coef: sv_coef_ptrs.as_ptr(), rho: self.rho.as_ptr(), probA: self.probA.as_ptr(), probB: self.probB.as_ptr(), sv_indices: self.sv_indices.as_ptr(), label: self.label.as_ptr(), nSV: self.nSV.as_ptr(), free_sv: self.free_sv, } } unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned() } unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).label, nr_class).to_owned() } unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> { let l = (*model_ptr).l; let mut sv_rows = Vec::with_capacity(l as usize); let sv_ptr = (*model_ptr).SV; for row in 0..l { let mut sv_row = Vec::new(); let sv_row_ptr = *sv_ptr.offset(row as isize); let mut i = 0; loop { let node = (*sv_row_ptr.offset(i as isize)).clone(); sv_row.push(node.clone()); if node.index == -1 { break; } i += 1; } sv_rows.push(sv_row); } sv_rows } unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> { let mut nr_class = (*model_ptr).nr_class as usize; nr_class = nr_class * (nr_class - 1) / 2; slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned() } unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> { let nr_class = (*model_ptr).nr_class as usize; let l = (*model_ptr).l as usize; slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1) .iter() .map(|&x| slice::from_raw_parts(x, l).to_owned()) .collect::<Vec<_>>() } } extern "C" { fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel; fn svm_predict_values( svm_model: *mut LibsvmModel, svm_nodes: *const LibsvmNode, out: *const f64, ) -> f64; fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel); fn svm_check_parameter( problem: *const LibsvmProblem, param: *const LibsvmParameter, ) -> *const c_char; } fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> { unsafe { let message = svm_check_parameter(problem, param); if message.is_null() { Ok(()) } else { Err(CStr::from_ptr(message).to_str().unwrap().to_owned()) } } } /// Fit a `libsvm` model. pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str> where T: IndexableMatrix, &'a T: RowIterable, { let problem = SvmProblem::new(X, y); let libsvm_problem = problem.build_problem(); let libsvm_param = parameters.build_libsvm_parameter(); let model_ptr = unsafe { match check( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) { Ok(_) => {} Err(error_str) => { // A bit of a horrible out-of-band error reporting, // we should switch the model traits to String errors println!("Libsvm check error: {}", error_str); return Err("Invalid libsvm parameters."); } }; svm_train( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) }; let model = SvmModel::new(parameters.clone(), model_ptr); unsafe { // Free the model data allocated by libsvm, // we've got our own, sane copy. svm_free_and_destroy_model(&model_ptr); } Ok(model) } /// Call `libsvm` to get predictions (both predicted classes /// and `OvO` decision function values. pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array) where T: IndexableMatrix, &'a T: RowIterable, { let x_rows = X.rows(); let num_classes = model.nr_class as usize; let ovo_num_classes = num_classes * (num_classes - 1) / 2; // We are actually mutating this in C, but convincing rustc that is // safe is a bit of a pain let df = vec![0.0; x_rows * ovo_num_classes]; let mut df_slice = &df[..]; let mut predicted_class = Vec::with_capacity(x_rows); // Allocate space for pointers to support vector components, // we don't need them after we're finished here // so they will be freed. let mut sv_ptrs = Vec::new(); let mut sv_coef_ptrs = Vec::new(); let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs); for (_, row) in X.iter_rows().enumerate() { let nodes = row_to_nodes(row); unsafe { predicted_class.push(svm_predict_values( &mut libsvm_model as *mut LibsvmModel, nodes.as_ptr(), df_slice.as_ptr(), ) as f32); } df_slice = &df_slice[ovo_num_classes..]; } let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>(); let mut df_array = Array::from(df_data); df_array.reshape(x_rows, ovo_num_classes); (df_array, Array::from(predicted_class)) }
kernel_type: KernelType, degree: i32, gamma: f64, coef0: f64,
random_line_split
ffi.rs
//! Internals of the `libsvm` FFI. //! //! Objects whose names start with `Libsvm` are for the most part //! things that we pass or get directly from `libsvm`, and are highly //! fragile. //! //! Their safe, memory-owning counterparts start with `Svm`. use std::ffi::CStr; use std::os::raw::c_char; use std::slice; use prelude::*; /// SVM type. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SvmType { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, } /// Type of the kernel used by the SVM. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KernelType { Linear, Polynomial, RBF, Sigmoid, /// Not implemented. Precomputed, } /// Libsvm uses a sparse representation of data, /// where every entry in the training matrix /// is characterised by a column index and a value. /// Because this is a safe Rust-like object in itself, /// it does not have a safe counterpart. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct LibsvmNode { index: i32, value: f64, } impl LibsvmNode { fn new(index: i32, value: f64) -> LibsvmNode { LibsvmNode { index: index, value: value, } } } /// Libsvm structure representing training data. #[repr(C)] struct LibsvmProblem { /// Number of rows in the training data. l: i32, y: *const f64, /// Rows of the X matrix. Because row lenghts /// are not stored anywhere, and do not need /// to be equal, `libsvm` uses index = -1 as /// a sentinel value. svm_node: *const *const LibsvmNode, } /// Safe version of `LibsvmProblem`. pub struct SvmProblem { nodes: Vec<Vec<LibsvmNode>>, node_ptrs: Vec<*const LibsvmNode>, y: Vec<f64>, } /// Conert a row of the X matrix to its Libsvm representation. fn
<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> { let mut nodes = Vec::new(); for (index, value) in row.iter_nonzero() { nodes.push(LibsvmNode::new(index as i32, value as f64)); } // Sentinel value for end of row nodes.push(LibsvmNode::new(-1, 0.0)); nodes } impl SvmProblem { /// Create a new `SvmProblem` from training data. pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem where T: IndexableMatrix, &'a T: RowIterable, { let mut nodes = Vec::with_capacity(X.rows()); for row in X.iter_rows() { let row_nodes = row_to_nodes(row); nodes.push(row_nodes) } let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>(); SvmProblem { nodes: nodes, node_ptrs: node_ptrs, y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(), } } /// Returns the unsafe object that can be passed into `libsvm`. fn build_problem(&self) -> LibsvmProblem { LibsvmProblem { l: self.nodes.len() as i32, y: self.y.as_ptr(), svm_node: self.node_ptrs.as_ptr(), } } } /// `libsvm` representation of training parameters. #[repr(C)] struct LibsvmParameter { svm_type: SvmType, kernel_type: KernelType, degree: i32, gamma: f64, coef0: f64, cache_size: f64, eps: f64, C: f64, nr_weight: i32, weight_label: *const i32, weight: *const f64, nu: f64, p: f64, shrinking: i32, probability: i32, } /// Safe representation of `LibsvmParameter`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmParameter { svm_type: SvmType, kernel_type: KernelType, pub degree: i32, pub gamma: f64, pub coef0: f64, pub cache_size: f64, eps: f64, pub C: f64, nr_weight: i32, weight_label: Vec<i32>, weight: Vec<f64>, nu: f64, p: f64, shrinking: i32, probability: i32, } impl SvmParameter { pub fn new( svm_type: SvmType, kernel_type: KernelType, num_classes: usize, dim: usize, ) -> SvmParameter { SvmParameter { svm_type: svm_type, kernel_type: kernel_type, degree: 3, gamma: 1.0 / dim as f64, C: 1.0, coef0: 0.0, cache_size: 100.0, eps: 0.1, nr_weight: num_classes as i32, weight: vec![1.0; num_classes], weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(), nu: 0.5, p: 0.1, shrinking: 1, probability: 0, } } /// Returns the parameter object to be passed into /// `libsvm` functions. fn build_libsvm_parameter(&self) -> LibsvmParameter { LibsvmParameter { svm_type: self.svm_type.clone(), kernel_type: self.kernel_type.clone(), degree: self.degree, gamma: self.gamma, C: self.C, coef0: self.coef0, cache_size: self.cache_size, eps: self.eps, nr_weight: self.nr_weight, weight: self.weight.as_ptr(), weight_label: self.weight_label.as_ptr(), nu: self.nu, p: self.p, shrinking: self.shrinking, probability: self.probability, } } } /// The model object returned from and accepted by /// `libsvm` functions. #[repr(C)] struct LibsvmModel { svm_parameter: LibsvmParameter, nr_class: i32, l: i32, SV: *const *const LibsvmNode, sv_coef: *const *const f64, rho: *const f64, probA: *const f64, probB: *const f64, sv_indices: *const i32, label: *const i32, nSV: *const i32, free_sv: i32, } /// Safe representation of `LibsvmModel`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmModel { svm_parameter: SvmParameter, nr_class: i32, l: i32, SV: Vec<Vec<LibsvmNode>>, sv_coef: Vec<Vec<f64>>, rho: Vec<f64>, probA: Vec<f64>, probB: Vec<f64>, sv_indices: Vec<i32>, label: Vec<i32>, nSV: Vec<i32>, free_sv: i32, } impl SvmModel { fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel { unsafe { SvmModel { svm_parameter: param, nr_class: (*model_ptr).nr_class, l: (*model_ptr).l, SV: SvmModel::get_SV(model_ptr), sv_coef: SvmModel::get_sv_coef(model_ptr), rho: SvmModel::get_rho(model_ptr), probA: vec![0.0], probB: vec![0.0], sv_indices: vec![0], label: SvmModel::get_label(model_ptr), nSV: SvmModel::get_nSV(model_ptr), free_sv: 0, } } } fn get_libsvm_model( &self, SV_ptrs: &mut Vec<*const LibsvmNode>, sv_coef_ptrs: &mut Vec<*const f64>, ) -> LibsvmModel { SV_ptrs.clear(); sv_coef_ptrs.clear(); for x in &self.SV { SV_ptrs.push(x.as_ptr()); } for x in &self.sv_coef { sv_coef_ptrs.push(x.as_ptr()); } LibsvmModel { svm_parameter: self.svm_parameter.build_libsvm_parameter(), nr_class: self.nr_class, l: self.l, SV: SV_ptrs.as_ptr(), sv_coef: sv_coef_ptrs.as_ptr(), rho: self.rho.as_ptr(), probA: self.probA.as_ptr(), probB: self.probB.as_ptr(), sv_indices: self.sv_indices.as_ptr(), label: self.label.as_ptr(), nSV: self.nSV.as_ptr(), free_sv: self.free_sv, } } unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned() } unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).label, nr_class).to_owned() } unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> { let l = (*model_ptr).l; let mut sv_rows = Vec::with_capacity(l as usize); let sv_ptr = (*model_ptr).SV; for row in 0..l { let mut sv_row = Vec::new(); let sv_row_ptr = *sv_ptr.offset(row as isize); let mut i = 0; loop { let node = (*sv_row_ptr.offset(i as isize)).clone(); sv_row.push(node.clone()); if node.index == -1 { break; } i += 1; } sv_rows.push(sv_row); } sv_rows } unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> { let mut nr_class = (*model_ptr).nr_class as usize; nr_class = nr_class * (nr_class - 1) / 2; slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned() } unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> { let nr_class = (*model_ptr).nr_class as usize; let l = (*model_ptr).l as usize; slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1) .iter() .map(|&x| slice::from_raw_parts(x, l).to_owned()) .collect::<Vec<_>>() } } extern "C" { fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel; fn svm_predict_values( svm_model: *mut LibsvmModel, svm_nodes: *const LibsvmNode, out: *const f64, ) -> f64; fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel); fn svm_check_parameter( problem: *const LibsvmProblem, param: *const LibsvmParameter, ) -> *const c_char; } fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> { unsafe { let message = svm_check_parameter(problem, param); if message.is_null() { Ok(()) } else { Err(CStr::from_ptr(message).to_str().unwrap().to_owned()) } } } /// Fit a `libsvm` model. pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str> where T: IndexableMatrix, &'a T: RowIterable, { let problem = SvmProblem::new(X, y); let libsvm_problem = problem.build_problem(); let libsvm_param = parameters.build_libsvm_parameter(); let model_ptr = unsafe { match check( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) { Ok(_) => {} Err(error_str) => { // A bit of a horrible out-of-band error reporting, // we should switch the model traits to String errors println!("Libsvm check error: {}", error_str); return Err("Invalid libsvm parameters."); } }; svm_train( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) }; let model = SvmModel::new(parameters.clone(), model_ptr); unsafe { // Free the model data allocated by libsvm, // we've got our own, sane copy. svm_free_and_destroy_model(&model_ptr); } Ok(model) } /// Call `libsvm` to get predictions (both predicted classes /// and `OvO` decision function values. pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array) where T: IndexableMatrix, &'a T: RowIterable, { let x_rows = X.rows(); let num_classes = model.nr_class as usize; let ovo_num_classes = num_classes * (num_classes - 1) / 2; // We are actually mutating this in C, but convincing rustc that is // safe is a bit of a pain let df = vec![0.0; x_rows * ovo_num_classes]; let mut df_slice = &df[..]; let mut predicted_class = Vec::with_capacity(x_rows); // Allocate space for pointers to support vector components, // we don't need them after we're finished here // so they will be freed. let mut sv_ptrs = Vec::new(); let mut sv_coef_ptrs = Vec::new(); let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs); for (_, row) in X.iter_rows().enumerate() { let nodes = row_to_nodes(row); unsafe { predicted_class.push(svm_predict_values( &mut libsvm_model as *mut LibsvmModel, nodes.as_ptr(), df_slice.as_ptr(), ) as f32); } df_slice = &df_slice[ovo_num_classes..]; } let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>(); let mut df_array = Array::from(df_data); df_array.reshape(x_rows, ovo_num_classes); (df_array, Array::from(predicted_class)) }
row_to_nodes
identifier_name
ffi.rs
//! Internals of the `libsvm` FFI. //! //! Objects whose names start with `Libsvm` are for the most part //! things that we pass or get directly from `libsvm`, and are highly //! fragile. //! //! Their safe, memory-owning counterparts start with `Svm`. use std::ffi::CStr; use std::os::raw::c_char; use std::slice; use prelude::*; /// SVM type. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SvmType { C_SVC, NU_SVC, ONE_CLASS, EPSILON_SVR, NU_SVR, } /// Type of the kernel used by the SVM. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub enum KernelType { Linear, Polynomial, RBF, Sigmoid, /// Not implemented. Precomputed, } /// Libsvm uses a sparse representation of data, /// where every entry in the training matrix /// is characterised by a column index and a value. /// Because this is a safe Rust-like object in itself, /// it does not have a safe counterpart. #[repr(C)] #[derive(Clone, Debug, Serialize, Deserialize)] pub struct LibsvmNode { index: i32, value: f64, } impl LibsvmNode { fn new(index: i32, value: f64) -> LibsvmNode { LibsvmNode { index: index, value: value, } } } /// Libsvm structure representing training data. #[repr(C)] struct LibsvmProblem { /// Number of rows in the training data. l: i32, y: *const f64, /// Rows of the X matrix. Because row lenghts /// are not stored anywhere, and do not need /// to be equal, `libsvm` uses index = -1 as /// a sentinel value. svm_node: *const *const LibsvmNode, } /// Safe version of `LibsvmProblem`. pub struct SvmProblem { nodes: Vec<Vec<LibsvmNode>>, node_ptrs: Vec<*const LibsvmNode>, y: Vec<f64>, } /// Conert a row of the X matrix to its Libsvm representation. fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> { let mut nodes = Vec::new(); for (index, value) in row.iter_nonzero() { nodes.push(LibsvmNode::new(index as i32, value as f64)); } // Sentinel value for end of row nodes.push(LibsvmNode::new(-1, 0.0)); nodes } impl SvmProblem { /// Create a new `SvmProblem` from training data. pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem where T: IndexableMatrix, &'a T: RowIterable,
/// Returns the unsafe object that can be passed into `libsvm`. fn build_problem(&self) -> LibsvmProblem { LibsvmProblem { l: self.nodes.len() as i32, y: self.y.as_ptr(), svm_node: self.node_ptrs.as_ptr(), } } } /// `libsvm` representation of training parameters. #[repr(C)] struct LibsvmParameter { svm_type: SvmType, kernel_type: KernelType, degree: i32, gamma: f64, coef0: f64, cache_size: f64, eps: f64, C: f64, nr_weight: i32, weight_label: *const i32, weight: *const f64, nu: f64, p: f64, shrinking: i32, probability: i32, } /// Safe representation of `LibsvmParameter`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmParameter { svm_type: SvmType, kernel_type: KernelType, pub degree: i32, pub gamma: f64, pub coef0: f64, pub cache_size: f64, eps: f64, pub C: f64, nr_weight: i32, weight_label: Vec<i32>, weight: Vec<f64>, nu: f64, p: f64, shrinking: i32, probability: i32, } impl SvmParameter { pub fn new( svm_type: SvmType, kernel_type: KernelType, num_classes: usize, dim: usize, ) -> SvmParameter { SvmParameter { svm_type: svm_type, kernel_type: kernel_type, degree: 3, gamma: 1.0 / dim as f64, C: 1.0, coef0: 0.0, cache_size: 100.0, eps: 0.1, nr_weight: num_classes as i32, weight: vec![1.0; num_classes], weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(), nu: 0.5, p: 0.1, shrinking: 1, probability: 0, } } /// Returns the parameter object to be passed into /// `libsvm` functions. fn build_libsvm_parameter(&self) -> LibsvmParameter { LibsvmParameter { svm_type: self.svm_type.clone(), kernel_type: self.kernel_type.clone(), degree: self.degree, gamma: self.gamma, C: self.C, coef0: self.coef0, cache_size: self.cache_size, eps: self.eps, nr_weight: self.nr_weight, weight: self.weight.as_ptr(), weight_label: self.weight_label.as_ptr(), nu: self.nu, p: self.p, shrinking: self.shrinking, probability: self.probability, } } } /// The model object returned from and accepted by /// `libsvm` functions. #[repr(C)] struct LibsvmModel { svm_parameter: LibsvmParameter, nr_class: i32, l: i32, SV: *const *const LibsvmNode, sv_coef: *const *const f64, rho: *const f64, probA: *const f64, probB: *const f64, sv_indices: *const i32, label: *const i32, nSV: *const i32, free_sv: i32, } /// Safe representation of `LibsvmModel`. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SvmModel { svm_parameter: SvmParameter, nr_class: i32, l: i32, SV: Vec<Vec<LibsvmNode>>, sv_coef: Vec<Vec<f64>>, rho: Vec<f64>, probA: Vec<f64>, probB: Vec<f64>, sv_indices: Vec<i32>, label: Vec<i32>, nSV: Vec<i32>, free_sv: i32, } impl SvmModel { fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel { unsafe { SvmModel { svm_parameter: param, nr_class: (*model_ptr).nr_class, l: (*model_ptr).l, SV: SvmModel::get_SV(model_ptr), sv_coef: SvmModel::get_sv_coef(model_ptr), rho: SvmModel::get_rho(model_ptr), probA: vec![0.0], probB: vec![0.0], sv_indices: vec![0], label: SvmModel::get_label(model_ptr), nSV: SvmModel::get_nSV(model_ptr), free_sv: 0, } } } fn get_libsvm_model( &self, SV_ptrs: &mut Vec<*const LibsvmNode>, sv_coef_ptrs: &mut Vec<*const f64>, ) -> LibsvmModel { SV_ptrs.clear(); sv_coef_ptrs.clear(); for x in &self.SV { SV_ptrs.push(x.as_ptr()); } for x in &self.sv_coef { sv_coef_ptrs.push(x.as_ptr()); } LibsvmModel { svm_parameter: self.svm_parameter.build_libsvm_parameter(), nr_class: self.nr_class, l: self.l, SV: SV_ptrs.as_ptr(), sv_coef: sv_coef_ptrs.as_ptr(), rho: self.rho.as_ptr(), probA: self.probA.as_ptr(), probB: self.probB.as_ptr(), sv_indices: self.sv_indices.as_ptr(), label: self.label.as_ptr(), nSV: self.nSV.as_ptr(), free_sv: self.free_sv, } } unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned() } unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> { let nr_class = (*model_ptr).nr_class as usize; slice::from_raw_parts((*model_ptr).label, nr_class).to_owned() } unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> { let l = (*model_ptr).l; let mut sv_rows = Vec::with_capacity(l as usize); let sv_ptr = (*model_ptr).SV; for row in 0..l { let mut sv_row = Vec::new(); let sv_row_ptr = *sv_ptr.offset(row as isize); let mut i = 0; loop { let node = (*sv_row_ptr.offset(i as isize)).clone(); sv_row.push(node.clone()); if node.index == -1 { break; } i += 1; } sv_rows.push(sv_row); } sv_rows } unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> { let mut nr_class = (*model_ptr).nr_class as usize; nr_class = nr_class * (nr_class - 1) / 2; slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned() } unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> { let nr_class = (*model_ptr).nr_class as usize; let l = (*model_ptr).l as usize; slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1) .iter() .map(|&x| slice::from_raw_parts(x, l).to_owned()) .collect::<Vec<_>>() } } extern "C" { fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel; fn svm_predict_values( svm_model: *mut LibsvmModel, svm_nodes: *const LibsvmNode, out: *const f64, ) -> f64; fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel); fn svm_check_parameter( problem: *const LibsvmProblem, param: *const LibsvmParameter, ) -> *const c_char; } fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> { unsafe { let message = svm_check_parameter(problem, param); if message.is_null() { Ok(()) } else { Err(CStr::from_ptr(message).to_str().unwrap().to_owned()) } } } /// Fit a `libsvm` model. pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str> where T: IndexableMatrix, &'a T: RowIterable, { let problem = SvmProblem::new(X, y); let libsvm_problem = problem.build_problem(); let libsvm_param = parameters.build_libsvm_parameter(); let model_ptr = unsafe { match check( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) { Ok(_) => {} Err(error_str) => { // A bit of a horrible out-of-band error reporting, // we should switch the model traits to String errors println!("Libsvm check error: {}", error_str); return Err("Invalid libsvm parameters."); } }; svm_train( &libsvm_problem as *const LibsvmProblem, &libsvm_param as *const LibsvmParameter, ) }; let model = SvmModel::new(parameters.clone(), model_ptr); unsafe { // Free the model data allocated by libsvm, // we've got our own, sane copy. svm_free_and_destroy_model(&model_ptr); } Ok(model) } /// Call `libsvm` to get predictions (both predicted classes /// and `OvO` decision function values. pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array) where T: IndexableMatrix, &'a T: RowIterable, { let x_rows = X.rows(); let num_classes = model.nr_class as usize; let ovo_num_classes = num_classes * (num_classes - 1) / 2; // We are actually mutating this in C, but convincing rustc that is // safe is a bit of a pain let df = vec![0.0; x_rows * ovo_num_classes]; let mut df_slice = &df[..]; let mut predicted_class = Vec::with_capacity(x_rows); // Allocate space for pointers to support vector components, // we don't need them after we're finished here // so they will be freed. let mut sv_ptrs = Vec::new(); let mut sv_coef_ptrs = Vec::new(); let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs); for (_, row) in X.iter_rows().enumerate() { let nodes = row_to_nodes(row); unsafe { predicted_class.push(svm_predict_values( &mut libsvm_model as *mut LibsvmModel, nodes.as_ptr(), df_slice.as_ptr(), ) as f32); } df_slice = &df_slice[ovo_num_classes..]; } let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>(); let mut df_array = Array::from(df_data); df_array.reshape(x_rows, ovo_num_classes); (df_array, Array::from(predicted_class)) }
{ let mut nodes = Vec::with_capacity(X.rows()); for row in X.iter_rows() { let row_nodes = row_to_nodes(row); nodes.push(row_nodes) } let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>(); SvmProblem { nodes: nodes, node_ptrs: node_ptrs, y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(), } }
identifier_body
lib.rs
use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE}; use eztrans_rs::{Container, EzTransLib}; use fxhash::FxHashMap; use serde_derive::{Deserialize, Serialize}; use std::ffi::CStr; use std::fs; use std::path::Path; use std::ptr::null_mut; pub struct EzDictItem { key: String, value: String, } impl EzDictItem { pub fn new(key: String, value: String) -> Self { assert!(!key.is_empty()); Self { key, value } } pub fn apply(&self, text: &mut String) { let mut prev_pos = 0; while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) { text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value); prev_pos = pos + self.value.len(); } } #[inline] pub fn key(&self) -> &str { &self.key } #[inline] pub fn value(&self) -> &str { &self.value } } #[test] fn dict_item_test() { let item = EzDictItem::new("あなた".into(), "아나타".into()); let mut foo = "あなた당신あなた".into(); item.apply(&mut foo); assert_eq!(foo, "아나타당신아나타"); } #[test] #[should_panic] fn dict_item_empty_key_test() { let _item = EzDictItem::new("".into(), "123".into()); } #[test] fn dict_item_empty_value_test() { let item = EzDictItem::new("123".into(), "".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "def"); } #[test] fn dict_item_eq_kv_test() { let item = EzDictItem::new("123".into(), "123".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "123def"); } #[derive(Serialize, Deserialize, Default)] struct EzDict { #[serde(default)] sort: bool, #[serde(alias = "BeforeDic")] #[serde(with = "dict_items")] #[serde(default)] before_dict: Vec<EzDictItem>, #[serde(alias = "AfterDic")] #[serde(with = "dict_items")] #[serde(default)] after_dict: Vec<EzDictItem>, } impl EzDict { pub fn sort_before_dict(&mut self) { if!self.sort { return
if!self.sort { return; } self.after_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort(&mut self) { self.sort_after_dict(); self.sort_before_dict(); } } mod dict_items { use super::EzDictItem; use serde::de::{MapAccess, Visitor}; use serde::ser::SerializeMap; use serde::{Deserializer, Serializer}; use std::fmt; pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> { let mut map = s.serialize_map(Some(items.len()))?; for item in items { map.serialize_entry(item.key(), item.value())?; } map.end() } pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> { struct ItemVisitor; impl<'de> Visitor<'de> for ItemVisitor { type Value = Vec<EzDictItem>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("key and value") } fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> { let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10)); while let Some((key, value)) = access.next_entry()? { ret.push(EzDictItem::new(key, value)); } Ok(ret) } } d.deserialize_map(ItemVisitor) } } pub struct EzContext { lib: Container<EzTransLib<'static>>, cache: FxHashMap<String, String>, dict: EzDict, encode_buffer: Vec<u8>, string_buffer: String, } impl EzContext { pub fn from_path( lib: Container<EzTransLib<'static>>, path: &Path, ) -> Result<Self, Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); let json_dict_path = path.join("userdic.json"); let mut cache = if cache_path.exists() { rmp_serde::from_read(fs::File::open(cache_path)?)? } else { FxHashMap::default() }; cache.insert(String::new(), String::new()); let mut dict = if dict_path.exists() { serde_yaml::from_reader(fs::File::open(dict_path)?)? } else if json_dict_path.exists() { serde_json::from_reader(fs::File::open(json_dict_path)?)? } else { EzDict::default() }; dict.sort(); Ok(Self { lib, cache, dict, encode_buffer: Vec::with_capacity(8192), string_buffer: String::new(), }) } pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); use std::fs::write; write(cache_path, rmp_serde::to_vec(&self.cache)?)?; write(dict_path, serde_yaml::to_vec(&self.dict)?)?; Ok(()) } fn translate_impl(&mut self, text: &str) -> &str { let dict = &mut self.dict; let lib = &self.lib; let buf = &mut self.encode_buffer; let str_buf = &mut self.string_buffer; self.cache.entry(text.into()).or_insert_with(move || { str_buf.push_str(text); let mut encoder = SHIFT_JIS.new_encoder(); let mut decoder = EUC_KR.new_decoder_without_bom_handling(); let max_buf_len = encoder .max_buffer_length_from_utf8_without_replacement(str_buf.len()) .unwrap_or(0); buf.reserve(max_buf_len + 1); let (encoder_ret, _) = encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true); buf.push(0); assert_eq!(encoder_ret, EncoderResult::InputEmpty); let translated = unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) }; let translated = translated.as_bytes(); buf.clear(); str_buf.clear(); let mut ret = String::with_capacity( decoder .max_utf8_buffer_length_without_replacement(translated.len()) .unwrap_or(0), ); let (_decoder_ret, _) = decoder.decode_to_string_without_replacement(translated, &mut ret, true); for after in dict.after_dict.iter() { after.apply(&mut ret); } ret }) } pub fn translate(&mut self, text: &str) -> &str { if!self.cache.contains_key(text) { let max_len = UTF_16LE .new_decoder_without_bom_handling() .max_utf8_buffer_length_without_replacement(text.len() * 2); let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3)); { let mut text = text.into(); for before in self.dict.before_dict.iter() { before.apply(&mut text); } let mut prev_pos = 0; let mut is_in_japanese = is_japanese(text.chars().next().unwrap()); for (pos, ch) in text.char_indices() { if is_japanese(ch) { if!is_in_japanese { ret.push_str(&text[prev_pos..pos]); prev_pos = pos; is_in_japanese = true; } } else { if is_in_japanese { let translated = self.translate_impl(&text[prev_pos..pos]); ret.push_str(translated); prev_pos = pos; is_in_japanese = false; } } } if!is_in_japanese { ret.push_str(&text[prev_pos..]); } else { let translated = self.translate_impl(&text[prev_pos..]); ret.push_str(translated); } } self.cache.insert(text.into(), ret); } self.cache.get(text).unwrap() } } #[no_mangle] pub unsafe extern "cdecl" fn ez_init( ez_path: *const u16, ez_path_len: usize, ctx_path: *const u16, ctx_path_len: usize, ) -> *mut EzContext { let path = utf16_to_string(ez_path, ez_path_len); let ctx_path = utf16_to_string(ctx_path, ctx_path_len); let path = Path::new(&path); let ctx_path = Path::new(&ctx_path); eprintln!("Loading lib from {}", path.display()); let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) { Ok(lib) => lib, Err(err) => { eprintln!("EzTrans library loading failed: {:?}", err); return null_mut(); } }; let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes(); dat_dir.push(0); lib.initialize( CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"), CStr::from_bytes_with_nul_unchecked(&dat_dir[..]), ); let ctx = match EzContext::from_path(lib, ctx_path) { Ok(ctx) => ctx, Err(err) => { eprintln!("Loading context failed: {:?}", err); return null_mut(); } }; Box::into_raw(Box::new(ctx)) } #[no_mangle] pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) { let path = utf16_to_string(path, path_len); let path = Path::new(&path); if let Err(err) = (*ctx).save_to(path) { eprintln!("Save err: {:?}", err); } } #[no_mangle] pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) { (*ctx).lib.terminate(); let _ = Box::from_raw(ctx); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_before_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.before_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_before_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_after_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.after_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_after_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_translate( ctx: *mut EzContext, text: *const u16, text_len: usize, out_text: *mut *const u8, out_text_len: *mut usize, ) -> i32 { let text = utf16_to_string(text, text_len); let translated = (*ctx).translate(text.as_ref()); *out_text = translated.as_ptr(); *out_text_len = translated.len(); 0 } fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] { unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) } } unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String { let (text, _) = UTF_16LE .decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len))); text.into() } fn is_japanese(ch: char) -> bool { let ch = ch as u32; (ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF) }
; } self.before_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort_after_dict(&mut self) {
identifier_body
lib.rs
use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE}; use eztrans_rs::{Container, EzTransLib}; use fxhash::FxHashMap; use serde_derive::{Deserialize, Serialize}; use std::ffi::CStr; use std::fs; use std::path::Path; use std::ptr::null_mut; pub struct EzDictItem { key: String, value: String, } impl EzDictItem { pub fn new(key: String, value: String) -> Self { assert!(!key.is_empty()); Self { key, value } } pub fn apply(&self, text: &mut String) { let mut prev_pos = 0; while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) { text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value); prev_pos = pos + self.value.len(); } } #[inline] pub fn key(&self) -> &str { &self.key } #[inline] pub fn value(&self) -> &str { &self.value } } #[test] fn dict_item_test() { let item = EzDictItem::new("あなた".into(), "아나타".into()); let mut foo = "あなた당신あなた".into(); item.apply(&mut foo); assert_eq!(foo, "아나타당신아나타"); } #[test] #[should_panic] fn dict_item_empty_key_test() { let _item = EzDictItem::new("".into(), "123".into()); } #[test] fn dict_item_empty_value_test() { let item = EzDictItem::new("123".into(), "".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "def"); } #[test] fn dict_item_eq_kv_test() { let item = EzDictItem::new("123".into(), "123".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "123def"); } #[derive(Serialize, Deserialize, Default)] struct EzDict { #[serde(default)] sort: bool, #[serde(alias = "BeforeDic")] #[serde(with = "dict_items")] #[serde(default)] before_dict: Vec<EzDictItem>, #[serde(alias = "AfterDic")] #[serde(with = "dict_items")] #[serde(default)] after_dict: Vec<EzDictItem>, }
impl EzDict { pub fn sort_before_dict(&mut self) { if!self.sort { return; } self.before_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort_after_dict(&mut self) { if!self.sort { return; } self.after_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort(&mut self) { self.sort_after_dict(); self.sort_before_dict(); } } mod dict_items { use super::EzDictItem; use serde::de::{MapAccess, Visitor}; use serde::ser::SerializeMap; use serde::{Deserializer, Serializer}; use std::fmt; pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> { let mut map = s.serialize_map(Some(items.len()))?; for item in items { map.serialize_entry(item.key(), item.value())?; } map.end() } pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> { struct ItemVisitor; impl<'de> Visitor<'de> for ItemVisitor { type Value = Vec<EzDictItem>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("key and value") } fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> { let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10)); while let Some((key, value)) = access.next_entry()? { ret.push(EzDictItem::new(key, value)); } Ok(ret) } } d.deserialize_map(ItemVisitor) } } pub struct EzContext { lib: Container<EzTransLib<'static>>, cache: FxHashMap<String, String>, dict: EzDict, encode_buffer: Vec<u8>, string_buffer: String, } impl EzContext { pub fn from_path( lib: Container<EzTransLib<'static>>, path: &Path, ) -> Result<Self, Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); let json_dict_path = path.join("userdic.json"); let mut cache = if cache_path.exists() { rmp_serde::from_read(fs::File::open(cache_path)?)? } else { FxHashMap::default() }; cache.insert(String::new(), String::new()); let mut dict = if dict_path.exists() { serde_yaml::from_reader(fs::File::open(dict_path)?)? } else if json_dict_path.exists() { serde_json::from_reader(fs::File::open(json_dict_path)?)? } else { EzDict::default() }; dict.sort(); Ok(Self { lib, cache, dict, encode_buffer: Vec::with_capacity(8192), string_buffer: String::new(), }) } pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); use std::fs::write; write(cache_path, rmp_serde::to_vec(&self.cache)?)?; write(dict_path, serde_yaml::to_vec(&self.dict)?)?; Ok(()) } fn translate_impl(&mut self, text: &str) -> &str { let dict = &mut self.dict; let lib = &self.lib; let buf = &mut self.encode_buffer; let str_buf = &mut self.string_buffer; self.cache.entry(text.into()).or_insert_with(move || { str_buf.push_str(text); let mut encoder = SHIFT_JIS.new_encoder(); let mut decoder = EUC_KR.new_decoder_without_bom_handling(); let max_buf_len = encoder .max_buffer_length_from_utf8_without_replacement(str_buf.len()) .unwrap_or(0); buf.reserve(max_buf_len + 1); let (encoder_ret, _) = encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true); buf.push(0); assert_eq!(encoder_ret, EncoderResult::InputEmpty); let translated = unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) }; let translated = translated.as_bytes(); buf.clear(); str_buf.clear(); let mut ret = String::with_capacity( decoder .max_utf8_buffer_length_without_replacement(translated.len()) .unwrap_or(0), ); let (_decoder_ret, _) = decoder.decode_to_string_without_replacement(translated, &mut ret, true); for after in dict.after_dict.iter() { after.apply(&mut ret); } ret }) } pub fn translate(&mut self, text: &str) -> &str { if!self.cache.contains_key(text) { let max_len = UTF_16LE .new_decoder_without_bom_handling() .max_utf8_buffer_length_without_replacement(text.len() * 2); let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3)); { let mut text = text.into(); for before in self.dict.before_dict.iter() { before.apply(&mut text); } let mut prev_pos = 0; let mut is_in_japanese = is_japanese(text.chars().next().unwrap()); for (pos, ch) in text.char_indices() { if is_japanese(ch) { if!is_in_japanese { ret.push_str(&text[prev_pos..pos]); prev_pos = pos; is_in_japanese = true; } } else { if is_in_japanese { let translated = self.translate_impl(&text[prev_pos..pos]); ret.push_str(translated); prev_pos = pos; is_in_japanese = false; } } } if!is_in_japanese { ret.push_str(&text[prev_pos..]); } else { let translated = self.translate_impl(&text[prev_pos..]); ret.push_str(translated); } } self.cache.insert(text.into(), ret); } self.cache.get(text).unwrap() } } #[no_mangle] pub unsafe extern "cdecl" fn ez_init( ez_path: *const u16, ez_path_len: usize, ctx_path: *const u16, ctx_path_len: usize, ) -> *mut EzContext { let path = utf16_to_string(ez_path, ez_path_len); let ctx_path = utf16_to_string(ctx_path, ctx_path_len); let path = Path::new(&path); let ctx_path = Path::new(&ctx_path); eprintln!("Loading lib from {}", path.display()); let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) { Ok(lib) => lib, Err(err) => { eprintln!("EzTrans library loading failed: {:?}", err); return null_mut(); } }; let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes(); dat_dir.push(0); lib.initialize( CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"), CStr::from_bytes_with_nul_unchecked(&dat_dir[..]), ); let ctx = match EzContext::from_path(lib, ctx_path) { Ok(ctx) => ctx, Err(err) => { eprintln!("Loading context failed: {:?}", err); return null_mut(); } }; Box::into_raw(Box::new(ctx)) } #[no_mangle] pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) { let path = utf16_to_string(path, path_len); let path = Path::new(&path); if let Err(err) = (*ctx).save_to(path) { eprintln!("Save err: {:?}", err); } } #[no_mangle] pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) { (*ctx).lib.terminate(); let _ = Box::from_raw(ctx); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_before_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.before_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_before_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_after_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.after_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_after_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_translate( ctx: *mut EzContext, text: *const u16, text_len: usize, out_text: *mut *const u8, out_text_len: *mut usize, ) -> i32 { let text = utf16_to_string(text, text_len); let translated = (*ctx).translate(text.as_ref()); *out_text = translated.as_ptr(); *out_text_len = translated.len(); 0 } fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] { unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) } } unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String { let (text, _) = UTF_16LE .decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len))); text.into() } fn is_japanese(ch: char) -> bool { let ch = ch as u32; (ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF) }
random_line_split
lib.rs
use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE}; use eztrans_rs::{Container, EzTransLib}; use fxhash::FxHashMap; use serde_derive::{Deserialize, Serialize}; use std::ffi::CStr; use std::fs; use std::path::Path; use std::ptr::null_mut; pub struct EzDictItem { key: String, value: String, } impl EzDictItem { pub fn new(key: String, value: String) -> Self { assert!(!key.is_empty()); Self { key, value } } pub fn apply(&self, text: &mut String) { let mut prev_pos = 0; while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) { text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value); prev_pos = pos + self.value.len(); } } #[inline] pub fn key(&self) -> &str { &self.key } #[inline] pub fn value(&self) -> &str { &self.value } } #[test] fn dict_item_test() { let item = EzDictItem::new("あなた".into(), "아나타".into()); let mut foo = "あなた당신あなた".into(); item.apply(&mut foo); assert_eq!(foo, "아나타당신아나타"); } #[test] #[should_panic] fn dict_item_empty_key_test() { let _item = EzDictItem::new("".into(), "123".into()); } #[test] fn dict_item_empty_value_test() { let item = EzDictItem::new("123".into(), "".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "def"); } #[test] fn dict_item_eq_kv_test() { let item = EzDictItem::new("123".into(), "123".into()); let mut foo = "123def".into(); item.apply(&mut foo); assert_eq!(foo, "123def"); } #[derive(Serialize, Deserialize, Default)] struct EzDict { #[serde(default)] sort: bool, #[serde(alias = "BeforeDic")] #[serde(with = "dict_items")] #[serde(default)] before_dict: Vec<EzDictItem>, #[serde(alias = "AfterDic")] #[serde(with = "dict_items")] #[serde(default)] after_dict: Vec<EzDictItem>, } impl EzDict { pub fn sort_before_dict(&mut self) { if!self.sort { return; } self.before_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort_after_dict(&mut self) { if!self.sort { return; } self.after_dict .sort_unstable_by(|l, r| l.key().cmp(r.key())); } pub fn sort(&mut self) { self.sort_after_dict(); self.sort_before_dict(); } } mod dict_items { use super::EzDictItem; use serde::de::{MapAccess, Visitor}; use serde::ser::SerializeMap; use serde::{Deserializer, Serializer}; use std::fmt; pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> { let mut map = s.serialize_map(Some(items.len()))?; for item in items { map.serialize_entry(item.key(), item.value())?; } map.end() } pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> { struct ItemVisitor; impl<'de> Visitor<'de> for ItemVisitor { type Value = Vec<EzDictItem>; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("key and value") } fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> { let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10)); while let Some((key, value)) = access.next_entry()? { ret.push(EzDictItem::new(key, value)); } Ok(ret) } } d.deserialize_map(ItemVisitor) } } pub struct EzContext { lib: Container<EzTransLib<'static>>, cache: FxHashMap<String, String>, dict: EzDict, encode_buffer: Vec<u8>, string_buffer: String, } impl EzContext { pub fn from_path( lib: Container<EzTransLib<'static>>, path: &Path, ) -> Result<Self, Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); let json_dict_path = path.join("userdic.json"); let mut cache = if cache_path.exists() { rmp_serde::from_read(fs::File::open(cache_path)?)? } else { FxHashMap::default() }; cache.insert(String::new(), String::new()); let mut dict = if dict_path.exists() { serde_yaml::from_reader(fs::File::open(dict_path)?)? } else if json_dict_path.exists() { serde_json::from_reader(fs::File::open(json_dict_path)?)? } else { EzDict::default() }; dict.sort(); Ok(Self { lib, cache, dict, encode_buffer: Vec::with_capacity(8192), string_buffer: String::new(), }) } pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> { let cache_path = path.join("cache.msgpack"); let dict_path = path.join("userdic.yml"); use std::fs::write; write(cache_path, rmp_serde::to_vec(&self.cache)?)?; write(dict_path, serde_yaml::to_vec(&self.dict)?)?; Ok(()) } fn translate_impl(&mut self, text: &str) -> &st
t dict = &mut self.dict; let lib = &self.lib; let buf = &mut self.encode_buffer; let str_buf = &mut self.string_buffer; self.cache.entry(text.into()).or_insert_with(move || { str_buf.push_str(text); let mut encoder = SHIFT_JIS.new_encoder(); let mut decoder = EUC_KR.new_decoder_without_bom_handling(); let max_buf_len = encoder .max_buffer_length_from_utf8_without_replacement(str_buf.len()) .unwrap_or(0); buf.reserve(max_buf_len + 1); let (encoder_ret, _) = encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true); buf.push(0); assert_eq!(encoder_ret, EncoderResult::InputEmpty); let translated = unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) }; let translated = translated.as_bytes(); buf.clear(); str_buf.clear(); let mut ret = String::with_capacity( decoder .max_utf8_buffer_length_without_replacement(translated.len()) .unwrap_or(0), ); let (_decoder_ret, _) = decoder.decode_to_string_without_replacement(translated, &mut ret, true); for after in dict.after_dict.iter() { after.apply(&mut ret); } ret }) } pub fn translate(&mut self, text: &str) -> &str { if!self.cache.contains_key(text) { let max_len = UTF_16LE .new_decoder_without_bom_handling() .max_utf8_buffer_length_without_replacement(text.len() * 2); let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3)); { let mut text = text.into(); for before in self.dict.before_dict.iter() { before.apply(&mut text); } let mut prev_pos = 0; let mut is_in_japanese = is_japanese(text.chars().next().unwrap()); for (pos, ch) in text.char_indices() { if is_japanese(ch) { if!is_in_japanese { ret.push_str(&text[prev_pos..pos]); prev_pos = pos; is_in_japanese = true; } } else { if is_in_japanese { let translated = self.translate_impl(&text[prev_pos..pos]); ret.push_str(translated); prev_pos = pos; is_in_japanese = false; } } } if!is_in_japanese { ret.push_str(&text[prev_pos..]); } else { let translated = self.translate_impl(&text[prev_pos..]); ret.push_str(translated); } } self.cache.insert(text.into(), ret); } self.cache.get(text).unwrap() } } #[no_mangle] pub unsafe extern "cdecl" fn ez_init( ez_path: *const u16, ez_path_len: usize, ctx_path: *const u16, ctx_path_len: usize, ) -> *mut EzContext { let path = utf16_to_string(ez_path, ez_path_len); let ctx_path = utf16_to_string(ctx_path, ctx_path_len); let path = Path::new(&path); let ctx_path = Path::new(&ctx_path); eprintln!("Loading lib from {}", path.display()); let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) { Ok(lib) => lib, Err(err) => { eprintln!("EzTrans library loading failed: {:?}", err); return null_mut(); } }; let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes(); dat_dir.push(0); lib.initialize( CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"), CStr::from_bytes_with_nul_unchecked(&dat_dir[..]), ); let ctx = match EzContext::from_path(lib, ctx_path) { Ok(ctx) => ctx, Err(err) => { eprintln!("Loading context failed: {:?}", err); return null_mut(); } }; Box::into_raw(Box::new(ctx)) } #[no_mangle] pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) { let path = utf16_to_string(path, path_len); let path = Path::new(&path); if let Err(err) = (*ctx).save_to(path) { eprintln!("Save err: {:?}", err); } } #[no_mangle] pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) { (*ctx).lib.terminate(); let _ = Box::from_raw(ctx); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_before_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.before_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_before_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_add_after_dict( ctx: *mut EzContext, key: *const u16, key_len: usize, value: *const u16, value_len: usize, ) { let key = utf16_to_string(key, key_len); let value = utf16_to_string(value, value_len); (*ctx).dict.after_dict.push(EzDictItem::new(key, value)); (*ctx).dict.sort_after_dict(); } #[no_mangle] pub unsafe extern "cdecl" fn ez_translate( ctx: *mut EzContext, text: *const u16, text_len: usize, out_text: *mut *const u8, out_text_len: *mut usize, ) -> i32 { let text = utf16_to_string(text, text_len); let translated = (*ctx).translate(text.as_ref()); *out_text = translated.as_ptr(); *out_text_len = translated.len(); 0 } fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] { unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) } } unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String { let (text, _) = UTF_16LE .decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len))); text.into() } fn is_japanese(ch: char) -> bool { let ch = ch as u32; (ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF) }
r { le
identifier_name
lib.rs
/// ``` pub fn install() { BacktracePrinter::default().install(default_output_stream()); } /// Create the default output stream. /// /// If stderr is attached to a tty, this is a colorized stderr, else it's /// a plain (colorless) stderr. pub fn default_output_stream() -> Box<StandardStream> { Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() { ColorChoice::Always } else { ColorChoice::Never })) } #[deprecated( since = "0.4.0", note = "Use `BacktracePrinter::into_panic_handler()` instead." )] pub fn create_panic_handler( printer: BacktracePrinter, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { let out_stream_mutex = Mutex::new(default_output_stream()); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = printer.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the error to stderr instead. eprintln!("Error while printing panic: {:?}", e); } }) } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")] pub fn install_with_settings(printer: BacktracePrinter) { std::panic::set_hook(printer.into_panic_handler(default_output_stream())) } // ============================================================================================== // // [Backtrace frame] // // ============================================================================================== // pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync +'static; #[derive(Debug)] pub struct Frame { pub n: usize, pub name: Option<String>, pub lineno: Option<u32>, pub filename: Option<PathBuf>, pub ip: usize, _private_ctor: (), } impl Frame { /// Heuristically determine whether the frame is likely to be part of a /// dependency. /// /// If it fails to detect some patterns in your code base, feel free to drop /// an issue / a pull request! fn is_dependency_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::", "core::", "backtrace::backtrace::", "_rust_begin_unwind", "color_traceback::", "__rust_", "___rust_", "__pthread", "_main", "main", "__scrt_common_main_seh", "BaseThreadInitThunk", "_start", "__libc_start_main", "start_thread", ]; // Inspect name. if let Some(ref name) = self.name { if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } } const FILE_PREFIXES: &[&str] = &[ "/rustc/", "src/libstd/", "src/libpanic_unwind/", "src/libtest/", ]; // Inspect filename. if let Some(ref filename) = self.filename { let filename = filename.to_string_lossy(); if FILE_PREFIXES.iter().any(|x| filename.starts_with(x)) || filename.contains("/.cargo/registry/src/") { return true; } } false } /// Heuristically determine whether a frame is likely to be a post panic /// frame. /// /// Post panic frames are frames of a functions called after the actual panic /// is already in progress and don't contain any useful information for a /// reader of the backtrace. fn is_post_panic_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "_rust_begin_unwind", "rust_begin_unwind", "core::result::unwrap_failed", "core::option::expect_none_failed", "core::panicking::panic_fmt", "color_backtrace::create_panic_handler", "std::panicking::begin_panic", "begin_panic_fmt", "backtrace::capture", ]; match self.name.as_ref() { Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)), None => false, } } /// Heuristically determine whether a frame is likely to be part of language /// runtime. fn is_runtime_init_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::rt::lang_start::", "test::run_test::run_test_inner::", "std::sys_common::backtrace::__rust_begin_short_backtrace", ]; let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) { (Some(name), Some(filename)) => (name, filename.to_string_lossy()), _ => return false, }; if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } // For Linux, this is the best rule for skipping test init I found. if name == "{{closure}}" && file == "src/libtest/lib.rs" { return true; } false } fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult { let (lineno, filename) = match (self.lineno, self.filename.as_ref()) { (Some(a), Some(b)) => (a, b), // Without a line number and file name, we can't sensibly proceed. _ => return Ok(()), }; let file = match File::open(filename) { Ok(file) => file, Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()), e @ Err(_) => e?, }; // Extract relevant lines. let reader = BufReader::new(file); let start_line = lineno - 2.min(lineno - 1); let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5); for (line, cur_line_no) in surrounding_src.zip(start_line..) { if cur_line_no == lineno { // Print actual source line with brighter color. out.set_color(&s.colors.selected_src_ln)?; writeln!(out, "{:>8} > {}", cur_line_no, line?)?; out.reset()?; } else { writeln!(out, "{:>8} │ {}", cur_line_no, line?)?; } } Ok(()) } /// Get the module's name by walking /proc/self/maps #[cfg(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) ))] fn module_info(&self) -> Option<(String, usize)> { use regex::Regex; use std::path::Path; let re = Regex::new( r"(?x) ^ (?P<start>[0-9a-f]{8,16}) - (?P<end>[0-9a-f]{8,16}) \s (?P<perm>[-rwxp]{4}) \s (?P<offset>[0-9a-f]{8}) \s [0-9a-f]+:[0-9a-f]+ \s [0-9]+ \s+ (?P<path>.*) $ ", ) .unwrap(); let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps"); for line in BufReader::new(mapsfile).lines() { let line = line.unwrap(); if let Some(caps) = re.captures(&line) { let (start, end, path) = ( usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(), usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(), caps.name("path").unwrap().as_str().to_string(), ); if self.ip >= start && self.ip < end { return if let Some(filename) = Path::new(&path).file_name() { Some((filename.to_str().unwrap().to_string(), start)) } else { None }; } } } None } #[cfg(not(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) )))] fn module_info(&self) -> Option<(String, usize)> { None } fn pr
self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult { let is_dependency_code = self.is_dependency_code(); // Print frame index. write!(out, "{:>2}: ", i)?; if s.should_print_addresses() { if let Some((module_name, module_base)) = self.module_info() { write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?; } else { write!(out, "0x{:016x} - ", self.ip)?; } } // Does the function have a hash suffix? // (dodging a dep on the regex crate here) let name = self .name .as_ref() .map(|s| s.as_str()) .unwrap_or("<unknown>"); let has_hash_suffix = name.len() > 19 && &name[name.len() - 19..name.len() - 16] == "::h" && name[name.len() - 16..].chars().all(|x| x.is_digit(16)); // Print function name. out.set_color(if is_dependency_code { &s.colors.dependency_code } else { &s.colors.crate_code })?; if has_hash_suffix { write!(out, "{}", &name[..name.len() - 19])?; if s.strip_function_hash { writeln!(out)?; } else { out.set_color(if is_dependency_code { &s.colors.dependency_code_hash } else { &s.colors.crate_code_hash })?; writeln!(out, "{}", &name[name.len() - 19..])?; } } else { writeln!(out, "{}", name)?; } out.reset()?; // Print source location, if known. if let Some(ref file) = self.filename { let filestr = file.to_str().unwrap_or("<bad utf8>"); let lineno = self .lineno .map_or("<unknown line>".to_owned(), |x| x.to_string()); writeln!(out, " at {}:{}", filestr, lineno)?; } else { writeln!(out, " at <unknown source file>")?; } // Maybe print source. if s.current_verbosity() >= Verbosity::Full { self.print_source_if_avail(out, s)?; } Ok(()) } } /// The default frame filter. Heuristically determines whether a frame is likely to be an /// uninteresting frame. This filters out post panic frames and runtime init frames and dependency /// code. pub fn default_frame_filter(frames: &mut Vec<&Frame>) { let top_cutoff = frames .iter() .rposition(|x| x.is_post_panic_code()) .map(|x| x + 2) // indices are 1 based .unwrap_or(0); let bottom_cutoff = frames .iter() .position(|x| x.is_runtime_init_code()) .unwrap_or_else(|| frames.len()); let rng = top_cutoff..=bottom_cutoff; frames.retain(|x| rng.contains(&x.n)) } // ============================================================================================== // // [BacktracePrinter] // // ============================================================================================== // /// Color scheme definition. #[derive(Debug, Clone)] pub struct ColorScheme { pub frames_omitted_msg: ColorSpec, pub header: ColorSpec, pub msg_loc_prefix: ColorSpec, pub src_loc: ColorSpec, pub src_loc_separator: ColorSpec, pub env_var: ColorSpec, pub dependency_code: ColorSpec, pub dependency_code_hash: ColorSpec, pub crate_code: ColorSpec, pub crate_code_hash: ColorSpec, pub selected_src_ln: ColorSpec, } impl ColorScheme { /// Helper to create a new `ColorSpec` & set a few properties in one wash. fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec { let mut cs = ColorSpec::new(); cs.set_fg(fg); cs.set_bold(bold); cs.set_intense(intense); cs } /// The classic `color-backtrace` scheme, as shown in the screenshots. pub fn classic() -> Self { Self { frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false), header: Self::cs(Some(Color::Red), false, false), msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false), src_loc: Self::cs(Some(Color::Magenta), false, false), src_loc_separator: Self::cs(Some(Color::White), false, false), env_var: Self::cs(None, false, true), dependency_code: Self::cs(Some(Color::Green), false, false), dependency_code_hash: Self::cs(Some(Color::Black), true, false), crate_code: Self::cs(Some(Color::Red), true, false), crate_code_hash: Self::cs(Some(Color::Black), true, false), selected_src_ln: Self::cs(None, false, true), } } } impl Default for ColorScheme { fn default() -> Self { Self::classic() } } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")] pub type Settings = BacktracePrinter; /// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs. #[derive(Clone)] pub struct BacktracePrinter { message: String, verbosity: Verbosity, lib_verbosity: Verbosity, strip_function_hash: bool, is_panic_handler: bool, colors: ColorScheme, filters: Vec<Arc<FilterCallback>>, should_print_addresses: bool, } impl Default for BacktracePrinter { fn default() -> Self { Self { verbosity: Verbosity::from_env(), lib_verbosity: Verbosity::lib_from_env(), message: "The application panicked (crashed).".to_owned(), strip_function_hash: false, colors: ColorScheme::classic(), is_panic_handler: false, filters: vec![Arc::new(default_frame_filter)], should_print_addresses: false, } } } impl std::fmt::Debug for BacktracePrinter { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Settings") .field("message", &self.message) .field("verbosity", &self.verbosity) .field("lib_verbosity", &self.lib_verbosity) .field("strip_function_hash", &self.strip_function_hash) .field("is_panic_handler", &self.is_panic_handler) .field("print_addresses", &self.should_print_addresses) .field("colors", &self.colors) .finish() } } /// Builder functions. impl BacktracePrinter { /// Alias for `BacktracePrinter::default`. pub fn new() -> Self { Self::default() } /// Alter the color scheme. /// /// Defaults to `ColorScheme::classic()`. pub fn color_scheme(mut self, colors: ColorScheme) -> Self { self.colors = colors; self } /// Controls the "greeting" message of the panic. /// /// Defaults to `"The application panicked (crashed)"`. pub fn message(mut self, message: impl Into<String>) -> Self { self.message = message.into(); self } /// Controls the verbosity level used when installed as panic handler. /// /// Defaults to `Verbosity::from_env()`. pub fn verbosity(mut self, v: Verbosity) -> Self { self.verbosity = v; self } /// Controls the lib verbosity level used when formatting user provided traces. /// /// Defaults to `Verbosity::lib_from_env()`. pub fn lib_verbosity(mut self, v: Verbosity) -> Self { self.lib_verbosity = v; self } /// Controls whether the hash part of functions is stripped. /// /// Defaults to `false`. pub fn strip_function_hash(mut self, strip: bool) -> Self { self.strip_function_hash = strip; self } /// Controls whether addresses (or module offsets if available) should be printed. /// /// Defaults to `false`. pub fn print_addresses(mut self, val: bool) -> Self { self.should_print_addresses = val; self } /// Add a custom filter to the set of frame filters /// /// Filters are run in the order they are added. /// /// # Example /// /// ```rust /// use color_backtrace::{default_output_stream, BacktracePrinter}; /// /// BacktracePrinter::new() /// .add_frame_filter(Box::new(|frames| { /// frames.retain(|x| matches!(&x.name, Some(n) if!n.starts_with("blabla"))) /// })) /// .install(default_output_stream()); /// ``` pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self { self.filters.push(filter.into()); self } /// Clears all filters associated with this printer, including the default filter pub fn clear_frame_filters(mut self) -> Self { self.filters.clear(); self } } /// Routines for putting the panic printer to use. impl BacktracePrinter { /// Install the `color_backtrace` handler with default settings. /// /// Output streams can be created via `default_output_stream()` or /// using any other stream that implements /// [`termcolor::WriteColor`](termcolor::WriteColor). pub fn install(self, out: impl WriteColor + Sync + Send +'static) { std::panic::set_hook(self.into_panic_handler(out)) } /// Create a `color_backtrace` panic handler from this panic printer. /// /// This can be used if you want to combine the handler with other handlers. pub fn into_panic_handler( mut self, out: impl WriteColor + Sync + Send +'static, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { self.is_panic_handler = true; let out_stream_mutex = Mutex::new(out); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = self.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just
int(&
identifier_name
lib.rs
/// ``` pub fn install() { BacktracePrinter::default().install(default_output_stream()); } /// Create the default output stream. /// /// If stderr is attached to a tty, this is a colorized stderr, else it's /// a plain (colorless) stderr. pub fn default_output_stream() -> Box<StandardStream> { Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() { ColorChoice::Always } else { ColorChoice::Never })) } #[deprecated( since = "0.4.0", note = "Use `BacktracePrinter::into_panic_handler()` instead." )] pub fn create_panic_handler( printer: BacktracePrinter, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { let out_stream_mutex = Mutex::new(default_output_stream()); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = printer.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the error to stderr instead. eprintln!("Error while printing panic: {:?}", e); } }) } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")] pub fn install_with_settings(printer: BacktracePrinter) { std::panic::set_hook(printer.into_panic_handler(default_output_stream())) } // ============================================================================================== // // [Backtrace frame] // // ============================================================================================== // pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync +'static; #[derive(Debug)] pub struct Frame { pub n: usize, pub name: Option<String>, pub lineno: Option<u32>, pub filename: Option<PathBuf>, pub ip: usize, _private_ctor: (), } impl Frame { /// Heuristically determine whether the frame is likely to be part of a /// dependency. /// /// If it fails to detect some patterns in your code base, feel free to drop /// an issue / a pull request! fn is_dependency_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::", "core::", "backtrace::backtrace::", "_rust_begin_unwind", "color_traceback::", "__rust_",
"___rust_", "__pthread", "_main", "main", "__scrt_common_main_seh", "BaseThreadInitThunk", "_start", "__libc_start_main", "start_thread", ]; // Inspect name. if let Some(ref name) = self.name { if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } } const FILE_PREFIXES: &[&str] = &[ "/rustc/", "src/libstd/", "src/libpanic_unwind/", "src/libtest/", ]; // Inspect filename. if let Some(ref filename) = self.filename { let filename = filename.to_string_lossy(); if FILE_PREFIXES.iter().any(|x| filename.starts_with(x)) || filename.contains("/.cargo/registry/src/") { return true; } } false } /// Heuristically determine whether a frame is likely to be a post panic /// frame. /// /// Post panic frames are frames of a functions called after the actual panic /// is already in progress and don't contain any useful information for a /// reader of the backtrace. fn is_post_panic_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "_rust_begin_unwind", "rust_begin_unwind", "core::result::unwrap_failed", "core::option::expect_none_failed", "core::panicking::panic_fmt", "color_backtrace::create_panic_handler", "std::panicking::begin_panic", "begin_panic_fmt", "backtrace::capture", ]; match self.name.as_ref() { Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)), None => false, } } /// Heuristically determine whether a frame is likely to be part of language /// runtime. fn is_runtime_init_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::rt::lang_start::", "test::run_test::run_test_inner::", "std::sys_common::backtrace::__rust_begin_short_backtrace", ]; let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) { (Some(name), Some(filename)) => (name, filename.to_string_lossy()), _ => return false, }; if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } // For Linux, this is the best rule for skipping test init I found. if name == "{{closure}}" && file == "src/libtest/lib.rs" { return true; } false } fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult { let (lineno, filename) = match (self.lineno, self.filename.as_ref()) { (Some(a), Some(b)) => (a, b), // Without a line number and file name, we can't sensibly proceed. _ => return Ok(()), }; let file = match File::open(filename) { Ok(file) => file, Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()), e @ Err(_) => e?, }; // Extract relevant lines. let reader = BufReader::new(file); let start_line = lineno - 2.min(lineno - 1); let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5); for (line, cur_line_no) in surrounding_src.zip(start_line..) { if cur_line_no == lineno { // Print actual source line with brighter color. out.set_color(&s.colors.selected_src_ln)?; writeln!(out, "{:>8} > {}", cur_line_no, line?)?; out.reset()?; } else { writeln!(out, "{:>8} │ {}", cur_line_no, line?)?; } } Ok(()) } /// Get the module's name by walking /proc/self/maps #[cfg(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) ))] fn module_info(&self) -> Option<(String, usize)> { use regex::Regex; use std::path::Path; let re = Regex::new( r"(?x) ^ (?P<start>[0-9a-f]{8,16}) - (?P<end>[0-9a-f]{8,16}) \s (?P<perm>[-rwxp]{4}) \s (?P<offset>[0-9a-f]{8}) \s [0-9a-f]+:[0-9a-f]+ \s [0-9]+ \s+ (?P<path>.*) $ ", ) .unwrap(); let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps"); for line in BufReader::new(mapsfile).lines() { let line = line.unwrap(); if let Some(caps) = re.captures(&line) { let (start, end, path) = ( usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(), usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(), caps.name("path").unwrap().as_str().to_string(), ); if self.ip >= start && self.ip < end { return if let Some(filename) = Path::new(&path).file_name() { Some((filename.to_str().unwrap().to_string(), start)) } else { None }; } } } None } #[cfg(not(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) )))] fn module_info(&self) -> Option<(String, usize)> { None } fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult { let is_dependency_code = self.is_dependency_code(); // Print frame index. write!(out, "{:>2}: ", i)?; if s.should_print_addresses() { if let Some((module_name, module_base)) = self.module_info() { write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?; } else { write!(out, "0x{:016x} - ", self.ip)?; } } // Does the function have a hash suffix? // (dodging a dep on the regex crate here) let name = self .name .as_ref() .map(|s| s.as_str()) .unwrap_or("<unknown>"); let has_hash_suffix = name.len() > 19 && &name[name.len() - 19..name.len() - 16] == "::h" && name[name.len() - 16..].chars().all(|x| x.is_digit(16)); // Print function name. out.set_color(if is_dependency_code { &s.colors.dependency_code } else { &s.colors.crate_code })?; if has_hash_suffix { write!(out, "{}", &name[..name.len() - 19])?; if s.strip_function_hash { writeln!(out)?; } else { out.set_color(if is_dependency_code { &s.colors.dependency_code_hash } else { &s.colors.crate_code_hash })?; writeln!(out, "{}", &name[name.len() - 19..])?; } } else { writeln!(out, "{}", name)?; } out.reset()?; // Print source location, if known. if let Some(ref file) = self.filename { let filestr = file.to_str().unwrap_or("<bad utf8>"); let lineno = self .lineno .map_or("<unknown line>".to_owned(), |x| x.to_string()); writeln!(out, " at {}:{}", filestr, lineno)?; } else { writeln!(out, " at <unknown source file>")?; } // Maybe print source. if s.current_verbosity() >= Verbosity::Full { self.print_source_if_avail(out, s)?; } Ok(()) } } /// The default frame filter. Heuristically determines whether a frame is likely to be an /// uninteresting frame. This filters out post panic frames and runtime init frames and dependency /// code. pub fn default_frame_filter(frames: &mut Vec<&Frame>) { let top_cutoff = frames .iter() .rposition(|x| x.is_post_panic_code()) .map(|x| x + 2) // indices are 1 based .unwrap_or(0); let bottom_cutoff = frames .iter() .position(|x| x.is_runtime_init_code()) .unwrap_or_else(|| frames.len()); let rng = top_cutoff..=bottom_cutoff; frames.retain(|x| rng.contains(&x.n)) } // ============================================================================================== // // [BacktracePrinter] // // ============================================================================================== // /// Color scheme definition. #[derive(Debug, Clone)] pub struct ColorScheme { pub frames_omitted_msg: ColorSpec, pub header: ColorSpec, pub msg_loc_prefix: ColorSpec, pub src_loc: ColorSpec, pub src_loc_separator: ColorSpec, pub env_var: ColorSpec, pub dependency_code: ColorSpec, pub dependency_code_hash: ColorSpec, pub crate_code: ColorSpec, pub crate_code_hash: ColorSpec, pub selected_src_ln: ColorSpec, } impl ColorScheme { /// Helper to create a new `ColorSpec` & set a few properties in one wash. fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec { let mut cs = ColorSpec::new(); cs.set_fg(fg); cs.set_bold(bold); cs.set_intense(intense); cs } /// The classic `color-backtrace` scheme, as shown in the screenshots. pub fn classic() -> Self { Self { frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false), header: Self::cs(Some(Color::Red), false, false), msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false), src_loc: Self::cs(Some(Color::Magenta), false, false), src_loc_separator: Self::cs(Some(Color::White), false, false), env_var: Self::cs(None, false, true), dependency_code: Self::cs(Some(Color::Green), false, false), dependency_code_hash: Self::cs(Some(Color::Black), true, false), crate_code: Self::cs(Some(Color::Red), true, false), crate_code_hash: Self::cs(Some(Color::Black), true, false), selected_src_ln: Self::cs(None, false, true), } } } impl Default for ColorScheme { fn default() -> Self { Self::classic() } } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")] pub type Settings = BacktracePrinter; /// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs. #[derive(Clone)] pub struct BacktracePrinter { message: String, verbosity: Verbosity, lib_verbosity: Verbosity, strip_function_hash: bool, is_panic_handler: bool, colors: ColorScheme, filters: Vec<Arc<FilterCallback>>, should_print_addresses: bool, } impl Default for BacktracePrinter { fn default() -> Self { Self { verbosity: Verbosity::from_env(), lib_verbosity: Verbosity::lib_from_env(), message: "The application panicked (crashed).".to_owned(), strip_function_hash: false, colors: ColorScheme::classic(), is_panic_handler: false, filters: vec![Arc::new(default_frame_filter)], should_print_addresses: false, } } } impl std::fmt::Debug for BacktracePrinter { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Settings") .field("message", &self.message) .field("verbosity", &self.verbosity) .field("lib_verbosity", &self.lib_verbosity) .field("strip_function_hash", &self.strip_function_hash) .field("is_panic_handler", &self.is_panic_handler) .field("print_addresses", &self.should_print_addresses) .field("colors", &self.colors) .finish() } } /// Builder functions. impl BacktracePrinter { /// Alias for `BacktracePrinter::default`. pub fn new() -> Self { Self::default() } /// Alter the color scheme. /// /// Defaults to `ColorScheme::classic()`. pub fn color_scheme(mut self, colors: ColorScheme) -> Self { self.colors = colors; self } /// Controls the "greeting" message of the panic. /// /// Defaults to `"The application panicked (crashed)"`. pub fn message(mut self, message: impl Into<String>) -> Self { self.message = message.into(); self } /// Controls the verbosity level used when installed as panic handler. /// /// Defaults to `Verbosity::from_env()`. pub fn verbosity(mut self, v: Verbosity) -> Self { self.verbosity = v; self } /// Controls the lib verbosity level used when formatting user provided traces. /// /// Defaults to `Verbosity::lib_from_env()`. pub fn lib_verbosity(mut self, v: Verbosity) -> Self { self.lib_verbosity = v; self } /// Controls whether the hash part of functions is stripped. /// /// Defaults to `false`. pub fn strip_function_hash(mut self, strip: bool) -> Self { self.strip_function_hash = strip; self } /// Controls whether addresses (or module offsets if available) should be printed. /// /// Defaults to `false`. pub fn print_addresses(mut self, val: bool) -> Self { self.should_print_addresses = val; self } /// Add a custom filter to the set of frame filters /// /// Filters are run in the order they are added. /// /// # Example /// /// ```rust /// use color_backtrace::{default_output_stream, BacktracePrinter}; /// /// BacktracePrinter::new() /// .add_frame_filter(Box::new(|frames| { /// frames.retain(|x| matches!(&x.name, Some(n) if!n.starts_with("blabla"))) /// })) /// .install(default_output_stream()); /// ``` pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self { self.filters.push(filter.into()); self } /// Clears all filters associated with this printer, including the default filter pub fn clear_frame_filters(mut self) -> Self { self.filters.clear(); self } } /// Routines for putting the panic printer to use. impl BacktracePrinter { /// Install the `color_backtrace` handler with default settings. /// /// Output streams can be created via `default_output_stream()` or /// using any other stream that implements /// [`termcolor::WriteColor`](termcolor::WriteColor). pub fn install(self, out: impl WriteColor + Sync + Send +'static) { std::panic::set_hook(self.into_panic_handler(out)) } /// Create a `color_backtrace` panic handler from this panic printer. /// /// This can be used if you want to combine the handler with other handlers. pub fn into_panic_handler( mut self, out: impl WriteColor + Sync + Send +'static, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { self.is_panic_handler = true; let out_stream_mutex = Mutex::new(out); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = self.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the error
random_line_split
lib.rs
/// ``` pub fn install() { BacktracePrinter::default().install(default_output_stream()); } /// Create the default output stream. /// /// If stderr is attached to a tty, this is a colorized stderr, else it's /// a plain (colorless) stderr. pub fn default_output_stream() -> Box<StandardStream> { Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() { ColorChoice::Always } else { ColorChoice::Never })) } #[deprecated( since = "0.4.0", note = "Use `BacktracePrinter::into_panic_handler()` instead." )] pub fn create_panic_handler( printer: BacktracePrinter, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { let out_stream_mutex = Mutex::new(default_output_stream()); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = printer.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the error to stderr instead. eprintln!("Error while printing panic: {:?}", e); } }) } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")] pub fn install_with_settings(printer: BacktracePrinter) { std::panic::set_hook(printer.into_panic_handler(default_output_stream())) } // ============================================================================================== // // [Backtrace frame] // // ============================================================================================== // pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync +'static; #[derive(Debug)] pub struct Frame { pub n: usize, pub name: Option<String>, pub lineno: Option<u32>, pub filename: Option<PathBuf>, pub ip: usize, _private_ctor: (), } impl Frame { /// Heuristically determine whether the frame is likely to be part of a /// dependency. /// /// If it fails to detect some patterns in your code base, feel free to drop /// an issue / a pull request! fn is_dependency_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::", "core::", "backtrace::backtrace::", "_rust_begin_unwind", "color_traceback::", "__rust_", "___rust_", "__pthread", "_main", "main", "__scrt_common_main_seh", "BaseThreadInitThunk", "_start", "__libc_start_main", "start_thread", ]; // Inspect name. if let Some(ref name) = self.name { if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } } const FILE_PREFIXES: &[&str] = &[ "/rustc/", "src/libstd/", "src/libpanic_unwind/", "src/libtest/", ]; // Inspect filename. if let Some(ref filename) = self.filename { let filename = filename.to_string_lossy(); if FILE_PREFIXES.iter().any(|x| filename.starts_with(x)) || filename.contains("/.cargo/registry/src/") { return true; } } false } /// Heuristically determine whether a frame is likely to be a post panic /// frame. /// /// Post panic frames are frames of a functions called after the actual panic /// is already in progress and don't contain any useful information for a /// reader of the backtrace. fn is_post_panic_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "_rust_begin_unwind", "rust_begin_unwind", "core::result::unwrap_failed", "core::option::expect_none_failed", "core::panicking::panic_fmt", "color_backtrace::create_panic_handler", "std::panicking::begin_panic", "begin_panic_fmt", "backtrace::capture", ]; match self.name.as_ref() { Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)), None => false, } } /// Heuristically determine whether a frame is likely to be part of language /// runtime. fn is_runtime_init_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::rt::lang_start::", "test::run_test::run_test_inner::", "std::sys_common::backtrace::__rust_begin_short_backtrace", ]; let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) { (Some(name), Some(filename)) => (name, filename.to_string_lossy()), _ => return false, }; if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } // For Linux, this is the best rule for skipping test init I found. if name == "{{closure}}" && file == "src/libtest/lib.rs" { return true; } false } fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult { let (lineno, filename) = match (self.lineno, self.filename.as_ref()) { (Some(a), Some(b)) => (a, b), // Without a line number and file name, we can't sensibly proceed. _ => return Ok(()), }; let file = match File::open(filename) { Ok(file) => file, Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()), e @ Err(_) => e?, }; // Extract relevant lines. let reader = BufReader::new(file); let start_line = lineno - 2.min(lineno - 1); let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5); for (line, cur_line_no) in surrounding_src.zip(start_line..) { if cur_line_no == lineno { // Print actual source line with brighter color. out.set_color(&s.colors.selected_src_ln)?; writeln!(out, "{:>8} > {}", cur_line_no, line?)?; out.reset()?; } else { writeln!(out, "{:>8} │ {}", cur_line_no, line?)?; } } Ok(()) } /// Get the module's name by walking /proc/self/maps #[cfg(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) ))] fn module_info(&self) -> Option<(String, usize)> { use regex::Regex; use std::path::Path; let re = Regex::new( r"(?x) ^ (?P<start>[0-9a-f]{8,16}) - (?P<end>[0-9a-f]{8,16}) \s (?P<perm>[-rwxp]{4}) \s (?P<offset>[0-9a-f]{8}) \s [0-9a-f]+:[0-9a-f]+ \s [0-9]+ \s+ (?P<path>.*) $ ", ) .unwrap(); let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps"); for line in BufReader::new(mapsfile).lines() { let line = line.unwrap(); if let Some(caps) = re.captures(&line) { let (start, end, path) = ( usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(), usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(), caps.name("path").unwrap().as_str().to_string(), ); if self.ip >= start && self.ip < end { return if let Some(filename) = Path::new(&path).file_name() { Some((filename.to_str().unwrap().to_string(), start)) } else { None }; } } } None } #[cfg(not(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) )))] fn module_info(&self) -> Option<(String, usize)> { None } fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult { let is_dependency_code = self.is_dependency_code(); // Print frame index. write!(out, "{:>2}: ", i)?; if s.should_print_addresses() { if let Some((module_name, module_base)) = self.module_info() { write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?; } else { write!(out, "0x{:016x} - ", self.ip)?; } } // Does the function have a hash suffix? // (dodging a dep on the regex crate here) let name = self .name .as_ref() .map(|s| s.as_str()) .unwrap_or("<unknown>"); let has_hash_suffix = name.len() > 19 && &name[name.len() - 19..name.len() - 16] == "::h" && name[name.len() - 16..].chars().all(|x| x.is_digit(16)); // Print function name. out.set_color(if is_dependency_code {
lse { &s.colors.crate_code })?; if has_hash_suffix { write!(out, "{}", &name[..name.len() - 19])?; if s.strip_function_hash { writeln!(out)?; } else { out.set_color(if is_dependency_code { &s.colors.dependency_code_hash } else { &s.colors.crate_code_hash })?; writeln!(out, "{}", &name[name.len() - 19..])?; } } else { writeln!(out, "{}", name)?; } out.reset()?; // Print source location, if known. if let Some(ref file) = self.filename { let filestr = file.to_str().unwrap_or("<bad utf8>"); let lineno = self .lineno .map_or("<unknown line>".to_owned(), |x| x.to_string()); writeln!(out, " at {}:{}", filestr, lineno)?; } else { writeln!(out, " at <unknown source file>")?; } // Maybe print source. if s.current_verbosity() >= Verbosity::Full { self.print_source_if_avail(out, s)?; } Ok(()) } } /// The default frame filter. Heuristically determines whether a frame is likely to be an /// uninteresting frame. This filters out post panic frames and runtime init frames and dependency /// code. pub fn default_frame_filter(frames: &mut Vec<&Frame>) { let top_cutoff = frames .iter() .rposition(|x| x.is_post_panic_code()) .map(|x| x + 2) // indices are 1 based .unwrap_or(0); let bottom_cutoff = frames .iter() .position(|x| x.is_runtime_init_code()) .unwrap_or_else(|| frames.len()); let rng = top_cutoff..=bottom_cutoff; frames.retain(|x| rng.contains(&x.n)) } // ============================================================================================== // // [BacktracePrinter] // // ============================================================================================== // /// Color scheme definition. #[derive(Debug, Clone)] pub struct ColorScheme { pub frames_omitted_msg: ColorSpec, pub header: ColorSpec, pub msg_loc_prefix: ColorSpec, pub src_loc: ColorSpec, pub src_loc_separator: ColorSpec, pub env_var: ColorSpec, pub dependency_code: ColorSpec, pub dependency_code_hash: ColorSpec, pub crate_code: ColorSpec, pub crate_code_hash: ColorSpec, pub selected_src_ln: ColorSpec, } impl ColorScheme { /// Helper to create a new `ColorSpec` & set a few properties in one wash. fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec { let mut cs = ColorSpec::new(); cs.set_fg(fg); cs.set_bold(bold); cs.set_intense(intense); cs } /// The classic `color-backtrace` scheme, as shown in the screenshots. pub fn classic() -> Self { Self { frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false), header: Self::cs(Some(Color::Red), false, false), msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false), src_loc: Self::cs(Some(Color::Magenta), false, false), src_loc_separator: Self::cs(Some(Color::White), false, false), env_var: Self::cs(None, false, true), dependency_code: Self::cs(Some(Color::Green), false, false), dependency_code_hash: Self::cs(Some(Color::Black), true, false), crate_code: Self::cs(Some(Color::Red), true, false), crate_code_hash: Self::cs(Some(Color::Black), true, false), selected_src_ln: Self::cs(None, false, true), } } } impl Default for ColorScheme { fn default() -> Self { Self::classic() } } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")] pub type Settings = BacktracePrinter; /// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs. #[derive(Clone)] pub struct BacktracePrinter { message: String, verbosity: Verbosity, lib_verbosity: Verbosity, strip_function_hash: bool, is_panic_handler: bool, colors: ColorScheme, filters: Vec<Arc<FilterCallback>>, should_print_addresses: bool, } impl Default for BacktracePrinter { fn default() -> Self { Self { verbosity: Verbosity::from_env(), lib_verbosity: Verbosity::lib_from_env(), message: "The application panicked (crashed).".to_owned(), strip_function_hash: false, colors: ColorScheme::classic(), is_panic_handler: false, filters: vec![Arc::new(default_frame_filter)], should_print_addresses: false, } } } impl std::fmt::Debug for BacktracePrinter { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Settings") .field("message", &self.message) .field("verbosity", &self.verbosity) .field("lib_verbosity", &self.lib_verbosity) .field("strip_function_hash", &self.strip_function_hash) .field("is_panic_handler", &self.is_panic_handler) .field("print_addresses", &self.should_print_addresses) .field("colors", &self.colors) .finish() } } /// Builder functions. impl BacktracePrinter { /// Alias for `BacktracePrinter::default`. pub fn new() -> Self { Self::default() } /// Alter the color scheme. /// /// Defaults to `ColorScheme::classic()`. pub fn color_scheme(mut self, colors: ColorScheme) -> Self { self.colors = colors; self } /// Controls the "greeting" message of the panic. /// /// Defaults to `"The application panicked (crashed)"`. pub fn message(mut self, message: impl Into<String>) -> Self { self.message = message.into(); self } /// Controls the verbosity level used when installed as panic handler. /// /// Defaults to `Verbosity::from_env()`. pub fn verbosity(mut self, v: Verbosity) -> Self { self.verbosity = v; self } /// Controls the lib verbosity level used when formatting user provided traces. /// /// Defaults to `Verbosity::lib_from_env()`. pub fn lib_verbosity(mut self, v: Verbosity) -> Self { self.lib_verbosity = v; self } /// Controls whether the hash part of functions is stripped. /// /// Defaults to `false`. pub fn strip_function_hash(mut self, strip: bool) -> Self { self.strip_function_hash = strip; self } /// Controls whether addresses (or module offsets if available) should be printed. /// /// Defaults to `false`. pub fn print_addresses(mut self, val: bool) -> Self { self.should_print_addresses = val; self } /// Add a custom filter to the set of frame filters /// /// Filters are run in the order they are added. /// /// # Example /// /// ```rust /// use color_backtrace::{default_output_stream, BacktracePrinter}; /// /// BacktracePrinter::new() /// .add_frame_filter(Box::new(|frames| { /// frames.retain(|x| matches!(&x.name, Some(n) if!n.starts_with("blabla"))) /// })) /// .install(default_output_stream()); /// ``` pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self { self.filters.push(filter.into()); self } /// Clears all filters associated with this printer, including the default filter pub fn clear_frame_filters(mut self) -> Self { self.filters.clear(); self } } /// Routines for putting the panic printer to use. impl BacktracePrinter { /// Install the `color_backtrace` handler with default settings. /// /// Output streams can be created via `default_output_stream()` or /// using any other stream that implements /// [`termcolor::WriteColor`](termcolor::WriteColor). pub fn install(self, out: impl WriteColor + Sync + Send +'static) { std::panic::set_hook(self.into_panic_handler(out)) } /// Create a `color_backtrace` panic handler from this panic printer. /// /// This can be used if you want to combine the handler with other handlers. pub fn into_panic_handler( mut self, out: impl WriteColor + Sync + Send +'static, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { self.is_panic_handler = true; let out_stream_mutex = Mutex::new(out); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = self.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the
&s.colors.dependency_code } e
conditional_block
lib.rs
/// ``` pub fn install() { BacktracePrinter::default().install(default_output_stream()); } /// Create the default output stream. /// /// If stderr is attached to a tty, this is a colorized stderr, else it's /// a plain (colorless) stderr. pub fn default_output_stream() -> Box<StandardStream> { Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() { ColorChoice::Always } else { ColorChoice::Never })) } #[deprecated( since = "0.4.0", note = "Use `BacktracePrinter::into_panic_handler()` instead." )] pub fn create_panic_handler( printer: BacktracePrinter, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { let out_stream_mutex = Mutex::new(default_output_stream()); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = printer.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the error to stderr instead. eprintln!("Error while printing panic: {:?}", e); } }) } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")] pub fn install_with_settings(printer: BacktracePrinter) { std::panic::set_hook(printer.into_panic_handler(default_output_stream())) } // ============================================================================================== // // [Backtrace frame] // // ============================================================================================== // pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync +'static; #[derive(Debug)] pub struct Frame { pub n: usize, pub name: Option<String>, pub lineno: Option<u32>, pub filename: Option<PathBuf>, pub ip: usize, _private_ctor: (), } impl Frame { /// Heuristically determine whether the frame is likely to be part of a /// dependency. /// /// If it fails to detect some patterns in your code base, feel free to drop /// an issue / a pull request! fn is_dependency_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::", "core::", "backtrace::backtrace::", "_rust_begin_unwind", "color_traceback::", "__rust_", "___rust_", "__pthread", "_main", "main", "__scrt_common_main_seh", "BaseThreadInitThunk", "_start", "__libc_start_main", "start_thread", ]; // Inspect name. if let Some(ref name) = self.name { if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } } const FILE_PREFIXES: &[&str] = &[ "/rustc/", "src/libstd/", "src/libpanic_unwind/", "src/libtest/", ]; // Inspect filename. if let Some(ref filename) = self.filename { let filename = filename.to_string_lossy(); if FILE_PREFIXES.iter().any(|x| filename.starts_with(x)) || filename.contains("/.cargo/registry/src/") { return true; } } false } /// Heuristically determine whether a frame is likely to be a post panic /// frame. /// /// Post panic frames are frames of a functions called after the actual panic /// is already in progress and don't contain any useful information for a /// reader of the backtrace. fn is_post_panic_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "_rust_begin_unwind", "rust_begin_unwind", "core::result::unwrap_failed", "core::option::expect_none_failed", "core::panicking::panic_fmt", "color_backtrace::create_panic_handler", "std::panicking::begin_panic", "begin_panic_fmt", "backtrace::capture", ]; match self.name.as_ref() { Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)), None => false, } } /// Heuristically determine whether a frame is likely to be part of language /// runtime. fn is_runtime_init_code(&self) -> bool { const SYM_PREFIXES: &[&str] = &[ "std::rt::lang_start::", "test::run_test::run_test_inner::", "std::sys_common::backtrace::__rust_begin_short_backtrace", ]; let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) { (Some(name), Some(filename)) => (name, filename.to_string_lossy()), _ => return false, }; if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) { return true; } // For Linux, this is the best rule for skipping test init I found. if name == "{{closure}}" && file == "src/libtest/lib.rs" { return true; } false } fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult { let (lineno, filename) = match (self.lineno, self.filename.as_ref()) { (Some(a), Some(b)) => (a, b), // Without a line number and file name, we can't sensibly proceed. _ => return Ok(()), }; let file = match File::open(filename) { Ok(file) => file, Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()), e @ Err(_) => e?, }; // Extract relevant lines. let reader = BufReader::new(file); let start_line = lineno - 2.min(lineno - 1); let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5); for (line, cur_line_no) in surrounding_src.zip(start_line..) { if cur_line_no == lineno { // Print actual source line with brighter color. out.set_color(&s.colors.selected_src_ln)?; writeln!(out, "{:>8} > {}", cur_line_no, line?)?; out.reset()?; } else { writeln!(out, "{:>8} │ {}", cur_line_no, line?)?; } } Ok(()) } /// Get the module's name by walking /proc/self/maps #[cfg(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) ))] fn module_info(&self) -> Option<(String, usize)> { use regex::Regex; use std::path::Path; let re = Regex::new( r"(?x) ^ (?P<start>[0-9a-f]{8,16}) - (?P<end>[0-9a-f]{8,16}) \s (?P<perm>[-rwxp]{4}) \s (?P<offset>[0-9a-f]{8}) \s [0-9a-f]+:[0-9a-f]+ \s [0-9]+ \s+ (?P<path>.*) $ ", ) .unwrap(); let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps"); for line in BufReader::new(mapsfile).lines() { let line = line.unwrap(); if let Some(caps) = re.captures(&line) { let (start, end, path) = ( usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(), usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(), caps.name("path").unwrap().as_str().to_string(), ); if self.ip >= start && self.ip < end { return if let Some(filename) = Path::new(&path).file_name() { Some((filename.to_str().unwrap().to_string(), start)) } else { None }; } } } None } #[cfg(not(all( feature = "resolve-modules", unix, not(any(target_os = "macos", target_os = "ios")) )))] fn module_info(&self) -> Option<(String, usize)> { None } fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult { let is_dependency_code = self.is_dependency_code(); // Print frame index. write!(out, "{:>2}: ", i)?; if s.should_print_addresses() { if let Some((module_name, module_base)) = self.module_info() { write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?; } else { write!(out, "0x{:016x} - ", self.ip)?; } } // Does the function have a hash suffix? // (dodging a dep on the regex crate here) let name = self .name .as_ref() .map(|s| s.as_str()) .unwrap_or("<unknown>"); let has_hash_suffix = name.len() > 19 && &name[name.len() - 19..name.len() - 16] == "::h" && name[name.len() - 16..].chars().all(|x| x.is_digit(16)); // Print function name. out.set_color(if is_dependency_code { &s.colors.dependency_code } else { &s.colors.crate_code })?; if has_hash_suffix { write!(out, "{}", &name[..name.len() - 19])?; if s.strip_function_hash { writeln!(out)?; } else { out.set_color(if is_dependency_code { &s.colors.dependency_code_hash } else { &s.colors.crate_code_hash })?; writeln!(out, "{}", &name[name.len() - 19..])?; } } else { writeln!(out, "{}", name)?; } out.reset()?; // Print source location, if known. if let Some(ref file) = self.filename { let filestr = file.to_str().unwrap_or("<bad utf8>"); let lineno = self .lineno .map_or("<unknown line>".to_owned(), |x| x.to_string()); writeln!(out, " at {}:{}", filestr, lineno)?; } else { writeln!(out, " at <unknown source file>")?; } // Maybe print source. if s.current_verbosity() >= Verbosity::Full { self.print_source_if_avail(out, s)?; } Ok(()) } } /// The default frame filter. Heuristically determines whether a frame is likely to be an /// uninteresting frame. This filters out post panic frames and runtime init frames and dependency /// code. pub fn default_frame_filter(frames: &mut Vec<&Frame>) { let top_cutoff = frames .iter() .rposition(|x| x.is_post_panic_code()) .map(|x| x + 2) // indices are 1 based .unwrap_or(0); let bottom_cutoff = frames .iter() .position(|x| x.is_runtime_init_code()) .unwrap_or_else(|| frames.len()); let rng = top_cutoff..=bottom_cutoff; frames.retain(|x| rng.contains(&x.n)) } // ============================================================================================== // // [BacktracePrinter] // // ============================================================================================== // /// Color scheme definition. #[derive(Debug, Clone)] pub struct ColorScheme { pub frames_omitted_msg: ColorSpec, pub header: ColorSpec, pub msg_loc_prefix: ColorSpec, pub src_loc: ColorSpec, pub src_loc_separator: ColorSpec, pub env_var: ColorSpec, pub dependency_code: ColorSpec, pub dependency_code_hash: ColorSpec, pub crate_code: ColorSpec, pub crate_code_hash: ColorSpec, pub selected_src_ln: ColorSpec, } impl ColorScheme { /// Helper to create a new `ColorSpec` & set a few properties in one wash. fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec { let mut cs = ColorSpec::new(); cs.set_fg(fg); cs.set_bold(bold); cs.set_intense(intense); cs } /// The classic `color-backtrace` scheme, as shown in the screenshots. pub fn classic() -> Self { Self { frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false), header: Self::cs(Some(Color::Red), false, false), msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false), src_loc: Self::cs(Some(Color::Magenta), false, false), src_loc_separator: Self::cs(Some(Color::White), false, false), env_var: Self::cs(None, false, true), dependency_code: Self::cs(Some(Color::Green), false, false), dependency_code_hash: Self::cs(Some(Color::Black), true, false), crate_code: Self::cs(Some(Color::Red), true, false), crate_code_hash: Self::cs(Some(Color::Black), true, false), selected_src_ln: Self::cs(None, false, true), } } } impl Default for ColorScheme { fn default() -> Self { Self::classic() } } #[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")] pub type Settings = BacktracePrinter; /// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs. #[derive(Clone)] pub struct BacktracePrinter { message: String, verbosity: Verbosity, lib_verbosity: Verbosity, strip_function_hash: bool, is_panic_handler: bool, colors: ColorScheme, filters: Vec<Arc<FilterCallback>>, should_print_addresses: bool, } impl Default for BacktracePrinter { fn default() -> Self { Self { verbosity: Verbosity::from_env(), lib_verbosity: Verbosity::lib_from_env(), message: "The application panicked (crashed).".to_owned(), strip_function_hash: false, colors: ColorScheme::classic(), is_panic_handler: false, filters: vec![Arc::new(default_frame_filter)], should_print_addresses: false, } } } impl std::fmt::Debug for BacktracePrinter { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Settings") .field("message", &self.message) .field("verbosity", &self.verbosity) .field("lib_verbosity", &self.lib_verbosity) .field("strip_function_hash", &self.strip_function_hash) .field("is_panic_handler", &self.is_panic_handler) .field("print_addresses", &self.should_print_addresses) .field("colors", &self.colors) .finish() } } /// Builder functions. impl BacktracePrinter { /// Alias for `BacktracePrinter::default`. pub fn new() -> Self {
/// Alter the color scheme. /// /// Defaults to `ColorScheme::classic()`. pub fn color_scheme(mut self, colors: ColorScheme) -> Self { self.colors = colors; self } /// Controls the "greeting" message of the panic. /// /// Defaults to `"The application panicked (crashed)"`. pub fn message(mut self, message: impl Into<String>) -> Self { self.message = message.into(); self } /// Controls the verbosity level used when installed as panic handler. /// /// Defaults to `Verbosity::from_env()`. pub fn verbosity(mut self, v: Verbosity) -> Self { self.verbosity = v; self } /// Controls the lib verbosity level used when formatting user provided traces. /// /// Defaults to `Verbosity::lib_from_env()`. pub fn lib_verbosity(mut self, v: Verbosity) -> Self { self.lib_verbosity = v; self } /// Controls whether the hash part of functions is stripped. /// /// Defaults to `false`. pub fn strip_function_hash(mut self, strip: bool) -> Self { self.strip_function_hash = strip; self } /// Controls whether addresses (or module offsets if available) should be printed. /// /// Defaults to `false`. pub fn print_addresses(mut self, val: bool) -> Self { self.should_print_addresses = val; self } /// Add a custom filter to the set of frame filters /// /// Filters are run in the order they are added. /// /// # Example /// /// ```rust /// use color_backtrace::{default_output_stream, BacktracePrinter}; /// /// BacktracePrinter::new() /// .add_frame_filter(Box::new(|frames| { /// frames.retain(|x| matches!(&x.name, Some(n) if!n.starts_with("blabla"))) /// })) /// .install(default_output_stream()); /// ``` pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self { self.filters.push(filter.into()); self } /// Clears all filters associated with this printer, including the default filter pub fn clear_frame_filters(mut self) -> Self { self.filters.clear(); self } } /// Routines for putting the panic printer to use. impl BacktracePrinter { /// Install the `color_backtrace` handler with default settings. /// /// Output streams can be created via `default_output_stream()` or /// using any other stream that implements /// [`termcolor::WriteColor`](termcolor::WriteColor). pub fn install(self, out: impl WriteColor + Sync + Send +'static) { std::panic::set_hook(self.into_panic_handler(out)) } /// Create a `color_backtrace` panic handler from this panic printer. /// /// This can be used if you want to combine the handler with other handlers. pub fn into_panic_handler( mut self, out: impl WriteColor + Sync + Send +'static, ) -> Box<dyn Fn(&PanicInfo<'_>) +'static + Sync + Send> { self.is_panic_handler = true; let out_stream_mutex = Mutex::new(out); Box::new(move |pi| { let mut lock = out_stream_mutex.lock().unwrap(); if let Err(e) = self.print_panic_info(pi, &mut *lock) { // Panicking while handling a panic would send us into a deadlock, // so we just print the
Self::default() }
identifier_body
lib.rs
//! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates. // Needed to use traits associated with std::io::BufReader. use std::io::BufRead; use std::io::Read; /// Type-erased errors. pub type BoxError = std::boxed::Box<dyn std::error::Error // must implement Error to satisfy? + std::marker::Send // needed for threads + std::marker::Sync // needed for threads >; /// Describes a mounted filesystem, see `man 8 mount` for more details. #[derive(Clone, Default, Debug)] pub struct Mount { /// The device from which the filesystem is mounted, e.g. /dev/sda1 pub device: std::string::String, /// Where in the root filesystem the device is mounted, e.g. /mnt/disk pub mount_point: std::string::String, /// The filesystem type, e.g. ext4 pub file_system_type: std::string::String, /// A vector of mount options, e.g. ["ro", "nosuid"] /// Note: This could also be implemented as a set (e.g. std::collections::HashSet) pub options: std::vec::Vec<std::string::String>, } /// Implements `Display` for `Mount` to simulate behavior of Unix mount command. /// /// # Examples /// ``` /// # use nom_tutorial::Mount; /// # use std::string::String; /// let mount = Mount { /// device: String::from("/dev/sda1"), /// mount_point: String::from("/mnt/disk"), /// file_system_type: String::from("ext4"), /// options: vec![String::from("ro"), String::from("nosuid")] /// }; /// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)"); /// ``` impl std::fmt::Display for Mount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(",")) } } /// Structure that accesses `/proc/mounts` and iterates over the contained mounts. /// /// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts. /// # Examples /// /// ``` /// # use nom_tutorial; /// for mount in nom_tutorial::mounts().unwrap() { /// println!("{}", mount.unwrap()); /// } pub struct Mounts { buf_reader: std::io::BufReader<std::fs::File> } impl Mounts { /// Returns a new Mounts instance. You can also call [mounts()] for convenience. pub fn new() -> std::result::Result<Mounts, std::io::Error> { let file = std::fs::File::open("/proc/mounts")?; Ok( Mounts { buf_reader: std::io::BufReader::new(file) } ) } } impl IntoIterator for Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIntoIterator; /// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example. fn into_iter(self) -> Self::IntoIter { MountsIntoIterator { lines: self.buf_reader.lines() } } } impl<'a> IntoIterator for &'a mut Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIteratorMut<'a>; /// Mutable iterator, see [Mounts::iter_mut()]. fn into_iter(self) -> Self::IntoIter { MountsIteratorMut { lines: self.buf_reader.by_ref().lines() } } } /// Consuming iterator for [Mounts]. pub struct MountsIntoIterator { lines: std::io::Lines<std::io::BufReader<std::fs::File>> } impl std::iter::Iterator for MountsIntoIterator { type Item = std::result::Result<Mount, BoxError>; /// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } /// Mutable iterator for `Mounts`. pub struct MountsIteratorMut<'a> { lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>> } impl<'a> std::iter::Iterator for MountsIteratorMut<'a> { type Item = std::result::Result<Mount, BoxError>; // Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } impl<'a> Mounts { // There is no non-mutable iterator. /// Mutable iterator. /// # Examples /// ``` /// # use nom_tutorial; /// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter(); /// match iter.next() { /// Some(m) => match m { /// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m), /// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e) /// }, /// None => eprintln!("There are no mounted filesystems.") /// } /// ``` pub fn
(&'a mut self) -> MountsIteratorMut<'a> { self.into_iter() } } // Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate. pub(self) mod parsers { use super::Mount; // Extract a string that does not contain whitespace (space or tab). Anything else goes. fn not_whitespace(i: &str) -> nom::IResult<&str, &str> { nom::bytes::complete::is_not(" \t")(i) } // Replace the sequence 040 with a space. fn escaped_space(i: &str) -> nom::IResult<&str, &str> { nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i) } // Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails. fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> { nom::combinator::recognize(nom::character::complete::char('\\'))(i) } // Replace all instances of \040 in a string with a space. // Replace \\ with a \. fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> { nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i) } // Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace. // Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`. fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> { nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i) } // Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser: // # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example: // ```ignore // /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here // ``` // // `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple. // ```ignore // let (device, _, mount_point /*,...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*,...*/); // let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*,...*/ }; // ``` pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> { match nom::combinator::all_consuming(nom::sequence::tuple(( nom::combinator::map_parser(not_whitespace, transform_escaped), // device nom::character::complete::space1, nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point nom::character::complete::space1, not_whitespace, // file_system_type nom::character::complete::space1, mount_opts, // options nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0, )))(i) { Ok((remaining_input, ( device, _, // whitespace mount_point, _, // whitespace file_system_type, _, // whitespace options, _, // whitespace _, // 0 _, // whitespace _, // 0 _, // optional whitespace ))) => { Ok((remaining_input, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options: options })) } Err(e) => Err(e) } } // Alternative version of `parse_line()` above that performs the same // function using a different style. Rather than parsing the entire line at // once with one big `nom::sequence::tuple` we break the parsing up into // multiple separate statements. Each statement runs a parser that returns // an `Ok(remaining_input, value)`. At the end of each statement we have // the `?` operator, which unwraps the result and returns early with an // error if parsing failed. The remaining input from each parser is used as // the input of each subsequent parser. Values are assigned to temporary // variables that are used to construct a `Mount` object at the end of the // function. Values that are not needed are discarded by assigning to `_`. #[allow(unused)] pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> { let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device let (i, _) = nom::character::complete::space1(i)?; let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point let (i, _) = nom::character::complete::space1(i)?; let (i, file_system_type) = not_whitespace(i)?; // file_system_type let (i, _) = nom::character::complete::space1(i)?; let (i, options) = mount_opts(i)?; // options let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple(( nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0 )))(i)?; Ok((i, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options:options })) } #[cfg(test)] mod tests { use super::*; // Extracts a string that does not contain whitespace, i.e. comma or tab. #[test] fn test_not_whitespace() { assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd"))); assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd"))); assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot)))); } // Converts 040 to a space. Does not actually recognize a literal space. #[test] fn test_escaped_space() { assert_eq!(escaped_space("040"), Ok(("", " "))); assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag)))); } // Converts `char` \ to `&str` \. #[test] fn test_escaped_backslash() { assert_eq!(escaped_backslash("\\"), Ok(("", "\\"))); assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char)))); } // Recognizes each escape sequence and transfoms it to the escaped literal. // For example, each \040 is transformed into a space. #[test] fn test_transform_escaped() { assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h")))); assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag)))); } // Parses a comma separated list of mount options, which might contain spaces. #[test] fn test_mount_opts() { assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()]))); } // Parses a line from /proc/mounts #[test] fn test_parse_line() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } // Parses a line from /proc/mounts #[test] fn test_parse_line_alternate() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } } } /// Convenience method equivalent to `Mounts::new()`. pub fn mounts() -> std::result::Result<Mounts, std::io::Error> { Mounts::new() }
iter_mut
identifier_name
lib.rs
//! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates. // Needed to use traits associated with std::io::BufReader. use std::io::BufRead; use std::io::Read; /// Type-erased errors. pub type BoxError = std::boxed::Box<dyn std::error::Error // must implement Error to satisfy? + std::marker::Send // needed for threads + std::marker::Sync // needed for threads >; /// Describes a mounted filesystem, see `man 8 mount` for more details. #[derive(Clone, Default, Debug)] pub struct Mount { /// The device from which the filesystem is mounted, e.g. /dev/sda1 pub device: std::string::String, /// Where in the root filesystem the device is mounted, e.g. /mnt/disk pub mount_point: std::string::String, /// The filesystem type, e.g. ext4 pub file_system_type: std::string::String, /// A vector of mount options, e.g. ["ro", "nosuid"] /// Note: This could also be implemented as a set (e.g. std::collections::HashSet) pub options: std::vec::Vec<std::string::String>, } /// Implements `Display` for `Mount` to simulate behavior of Unix mount command. /// /// # Examples /// ``` /// # use nom_tutorial::Mount; /// # use std::string::String; /// let mount = Mount { /// device: String::from("/dev/sda1"), /// mount_point: String::from("/mnt/disk"), /// file_system_type: String::from("ext4"), /// options: vec![String::from("ro"), String::from("nosuid")] /// }; /// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)"); /// ``` impl std::fmt::Display for Mount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(",")) } } /// Structure that accesses `/proc/mounts` and iterates over the contained mounts. /// /// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts. /// # Examples /// /// ``` /// # use nom_tutorial; /// for mount in nom_tutorial::mounts().unwrap() { /// println!("{}", mount.unwrap()); /// } pub struct Mounts { buf_reader: std::io::BufReader<std::fs::File> } impl Mounts { /// Returns a new Mounts instance. You can also call [mounts()] for convenience. pub fn new() -> std::result::Result<Mounts, std::io::Error> { let file = std::fs::File::open("/proc/mounts")?; Ok( Mounts { buf_reader: std::io::BufReader::new(file) } ) } } impl IntoIterator for Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIntoIterator; /// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example. fn into_iter(self) -> Self::IntoIter { MountsIntoIterator { lines: self.buf_reader.lines() } } } impl<'a> IntoIterator for &'a mut Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIteratorMut<'a>; /// Mutable iterator, see [Mounts::iter_mut()]. fn into_iter(self) -> Self::IntoIter { MountsIteratorMut { lines: self.buf_reader.by_ref().lines() } } } /// Consuming iterator for [Mounts]. pub struct MountsIntoIterator { lines: std::io::Lines<std::io::BufReader<std::fs::File>> } impl std::iter::Iterator for MountsIntoIterator { type Item = std::result::Result<Mount, BoxError>; /// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } /// Mutable iterator for `Mounts`. pub struct MountsIteratorMut<'a> { lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>> } impl<'a> std::iter::Iterator for MountsIteratorMut<'a> { type Item = std::result::Result<Mount, BoxError>; // Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } impl<'a> Mounts { // There is no non-mutable iterator. /// Mutable iterator. /// # Examples /// ``` /// # use nom_tutorial; /// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter(); /// match iter.next() { /// Some(m) => match m { /// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m), /// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e) /// }, /// None => eprintln!("There are no mounted filesystems.") /// } /// ``` pub fn iter_mut(&'a mut self) -> MountsIteratorMut<'a> { self.into_iter() } } // Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate. pub(self) mod parsers { use super::Mount; // Extract a string that does not contain whitespace (space or tab). Anything else goes. fn not_whitespace(i: &str) -> nom::IResult<&str, &str> { nom::bytes::complete::is_not(" \t")(i) } // Replace the sequence 040 with a space. fn escaped_space(i: &str) -> nom::IResult<&str, &str> { nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i) } // Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails. fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> { nom::combinator::recognize(nom::character::complete::char('\\'))(i) } // Replace all instances of \040 in a string with a space. // Replace \\ with a \. fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> { nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i) } // Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace. // Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`. fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> { nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i) } // Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser: // # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example: // ```ignore // /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here // ``` // // `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple. // ```ignore // let (device, _, mount_point /*,...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*,...*/); // let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*,...*/ }; // ``` pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> { match nom::combinator::all_consuming(nom::sequence::tuple(( nom::combinator::map_parser(not_whitespace, transform_escaped), // device nom::character::complete::space1, nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point nom::character::complete::space1, not_whitespace, // file_system_type nom::character::complete::space1, mount_opts, // options nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0, )))(i) { Ok((remaining_input, ( device, _, // whitespace mount_point, _, // whitespace file_system_type, _, // whitespace options, _, // whitespace _, // 0 _, // whitespace _, // 0 _, // optional whitespace ))) => { Ok((remaining_input, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options: options })) } Err(e) => Err(e) } } // Alternative version of `parse_line()` above that performs the same // function using a different style. Rather than parsing the entire line at // once with one big `nom::sequence::tuple` we break the parsing up into // multiple separate statements. Each statement runs a parser that returns // an `Ok(remaining_input, value)`. At the end of each statement we have // the `?` operator, which unwraps the result and returns early with an // error if parsing failed. The remaining input from each parser is used as // the input of each subsequent parser. Values are assigned to temporary // variables that are used to construct a `Mount` object at the end of the // function. Values that are not needed are discarded by assigning to `_`. #[allow(unused)] pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> { let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device let (i, _) = nom::character::complete::space1(i)?; let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point let (i, _) = nom::character::complete::space1(i)?; let (i, file_system_type) = not_whitespace(i)?; // file_system_type let (i, _) = nom::character::complete::space1(i)?; let (i, options) = mount_opts(i)?; // options let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple(( nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0 )))(i)?; Ok((i, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options:options })) } #[cfg(test)] mod tests { use super::*; // Extracts a string that does not contain whitespace, i.e. comma or tab. #[test] fn test_not_whitespace() { assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd"))); assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd"))); assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot)))); } // Converts 040 to a space. Does not actually recognize a literal space. #[test] fn test_escaped_space() { assert_eq!(escaped_space("040"), Ok(("", " "))); assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag)))); } // Converts `char` \ to `&str` \. #[test] fn test_escaped_backslash() { assert_eq!(escaped_backslash("\\"), Ok(("", "\\"))); assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char)))); } // Recognizes each escape sequence and transfoms it to the escaped literal. // For example, each \040 is transformed into a space. #[test] fn test_transform_escaped() { assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h")))); assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag)))); } // Parses a comma separated list of mount options, which might contain spaces. #[test] fn test_mount_opts() { assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()]))); } // Parses a line from /proc/mounts #[test] fn test_parse_line() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } // Parses a line from /proc/mounts #[test] fn test_parse_line_alternate() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } } } /// Convenience method equivalent to `Mounts::new()`. pub fn mounts() -> std::result::Result<Mounts, std::io::Error> {
}
Mounts::new()
random_line_split
lib.rs
//! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates. // Needed to use traits associated with std::io::BufReader. use std::io::BufRead; use std::io::Read; /// Type-erased errors. pub type BoxError = std::boxed::Box<dyn std::error::Error // must implement Error to satisfy? + std::marker::Send // needed for threads + std::marker::Sync // needed for threads >; /// Describes a mounted filesystem, see `man 8 mount` for more details. #[derive(Clone, Default, Debug)] pub struct Mount { /// The device from which the filesystem is mounted, e.g. /dev/sda1 pub device: std::string::String, /// Where in the root filesystem the device is mounted, e.g. /mnt/disk pub mount_point: std::string::String, /// The filesystem type, e.g. ext4 pub file_system_type: std::string::String, /// A vector of mount options, e.g. ["ro", "nosuid"] /// Note: This could also be implemented as a set (e.g. std::collections::HashSet) pub options: std::vec::Vec<std::string::String>, } /// Implements `Display` for `Mount` to simulate behavior of Unix mount command. /// /// # Examples /// ``` /// # use nom_tutorial::Mount; /// # use std::string::String; /// let mount = Mount { /// device: String::from("/dev/sda1"), /// mount_point: String::from("/mnt/disk"), /// file_system_type: String::from("ext4"), /// options: vec![String::from("ro"), String::from("nosuid")] /// }; /// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)"); /// ``` impl std::fmt::Display for Mount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(",")) } } /// Structure that accesses `/proc/mounts` and iterates over the contained mounts. /// /// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts. /// # Examples /// /// ``` /// # use nom_tutorial; /// for mount in nom_tutorial::mounts().unwrap() { /// println!("{}", mount.unwrap()); /// } pub struct Mounts { buf_reader: std::io::BufReader<std::fs::File> } impl Mounts { /// Returns a new Mounts instance. You can also call [mounts()] for convenience. pub fn new() -> std::result::Result<Mounts, std::io::Error> { let file = std::fs::File::open("/proc/mounts")?; Ok( Mounts { buf_reader: std::io::BufReader::new(file) } ) } } impl IntoIterator for Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIntoIterator; /// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example. fn into_iter(self) -> Self::IntoIter { MountsIntoIterator { lines: self.buf_reader.lines() } } } impl<'a> IntoIterator for &'a mut Mounts { type Item = std::result::Result<Mount, BoxError>; type IntoIter = MountsIteratorMut<'a>; /// Mutable iterator, see [Mounts::iter_mut()]. fn into_iter(self) -> Self::IntoIter { MountsIteratorMut { lines: self.buf_reader.by_ref().lines() } } } /// Consuming iterator for [Mounts]. pub struct MountsIntoIterator { lines: std::io::Lines<std::io::BufReader<std::fs::File>> } impl std::iter::Iterator for MountsIntoIterator { type Item = std::result::Result<Mount, BoxError>; /// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } /// Mutable iterator for `Mounts`. pub struct MountsIteratorMut<'a> { lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>> } impl<'a> std::iter::Iterator for MountsIteratorMut<'a> { type Item = std::result::Result<Mount, BoxError>; // Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example. fn next(&mut self) -> std::option::Option<Self::Item> { match self.lines.next() { Some(line) => match line { Ok(line) => match parsers::parse_line(&line[..]) { Ok( (_, m) ) => Some(Ok(m)), Err(e) => Some(Err(e.to_owned().into())) }, Err(e) => Some(Err(e.into())) }, None => None } } } impl<'a> Mounts { // There is no non-mutable iterator. /// Mutable iterator. /// # Examples /// ``` /// # use nom_tutorial; /// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter(); /// match iter.next() { /// Some(m) => match m { /// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m), /// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e) /// }, /// None => eprintln!("There are no mounted filesystems.") /// } /// ``` pub fn iter_mut(&'a mut self) -> MountsIteratorMut<'a> { self.into_iter() } } // Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate. pub(self) mod parsers { use super::Mount; // Extract a string that does not contain whitespace (space or tab). Anything else goes. fn not_whitespace(i: &str) -> nom::IResult<&str, &str> { nom::bytes::complete::is_not(" \t")(i) } // Replace the sequence 040 with a space. fn escaped_space(i: &str) -> nom::IResult<&str, &str> { nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i) } // Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails. fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> { nom::combinator::recognize(nom::character::complete::char('\\'))(i) } // Replace all instances of \040 in a string with a space. // Replace \\ with a \. fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> { nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i) } // Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace. // Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`. fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> { nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i) } // Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser: // # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example: // ```ignore // /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here // ``` // // `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple. // ```ignore // let (device, _, mount_point /*,...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*,...*/); // let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*,...*/ }; // ``` pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> { match nom::combinator::all_consuming(nom::sequence::tuple(( nom::combinator::map_parser(not_whitespace, transform_escaped), // device nom::character::complete::space1, nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point nom::character::complete::space1, not_whitespace, // file_system_type nom::character::complete::space1, mount_opts, // options nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0, )))(i) { Ok((remaining_input, ( device, _, // whitespace mount_point, _, // whitespace file_system_type, _, // whitespace options, _, // whitespace _, // 0 _, // whitespace _, // 0 _, // optional whitespace ))) => { Ok((remaining_input, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options: options })) } Err(e) => Err(e) } } // Alternative version of `parse_line()` above that performs the same // function using a different style. Rather than parsing the entire line at // once with one big `nom::sequence::tuple` we break the parsing up into // multiple separate statements. Each statement runs a parser that returns // an `Ok(remaining_input, value)`. At the end of each statement we have // the `?` operator, which unwraps the result and returns early with an // error if parsing failed. The remaining input from each parser is used as // the input of each subsequent parser. Values are assigned to temporary // variables that are used to construct a `Mount` object at the end of the // function. Values that are not needed are discarded by assigning to `_`. #[allow(unused)] pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> { let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device let (i, _) = nom::character::complete::space1(i)?; let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point let (i, _) = nom::character::complete::space1(i)?; let (i, file_system_type) = not_whitespace(i)?; // file_system_type let (i, _) = nom::character::complete::space1(i)?; let (i, options) = mount_opts(i)?; // options let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple(( nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space1, nom::character::complete::char('0'), nom::character::complete::space0 )))(i)?; Ok((i, Mount { device: device, mount_point: mount_point, file_system_type: file_system_type.to_string(), options:options })) } #[cfg(test)] mod tests { use super::*; // Extracts a string that does not contain whitespace, i.e. comma or tab. #[test] fn test_not_whitespace() { assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd"))); assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd"))); assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot)))); } // Converts 040 to a space. Does not actually recognize a literal space. #[test] fn test_escaped_space() { assert_eq!(escaped_space("040"), Ok(("", " "))); assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag)))); } // Converts `char` \ to `&str` \. #[test] fn test_escaped_backslash() { assert_eq!(escaped_backslash("\\"), Ok(("", "\\"))); assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char)))); } // Recognizes each escape sequence and transfoms it to the escaped literal. // For example, each \040 is transformed into a space. #[test] fn test_transform_escaped() { assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h")))); assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag)))); } // Parses a comma separated list of mount options, which might contain spaces. #[test] fn test_mount_opts() { assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()]))); } // Parses a line from /proc/mounts #[test] fn test_parse_line() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } // Parses a line from /proc/mounts #[test] fn test_parse_line_alternate() { let mount1 = Mount{ device: "device".to_string(), mount_point: "mount_point".to_string(), file_system_type: "file_system_type".to_string(), options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()] }; let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap(); assert_eq!(mount1.device, mount2.device); assert_eq!(mount1.mount_point, mount2.mount_point); assert_eq!(mount1.file_system_type, mount2.file_system_type); assert_eq!(mount1.options, mount2.options); } } } /// Convenience method equivalent to `Mounts::new()`. pub fn mounts() -> std::result::Result<Mounts, std::io::Error>
{ Mounts::new() }
identifier_body
smd.rs
use std::fs::File; use std::io::{BufReader}; use std::path::PathBuf; use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation}; use soto::task::{task_log}; use soto::Error; use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode}; use sotolib_fbx::animation::{Animation}; use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry}; use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone}; pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); let fbx_tree = ObjectTreeNode::from_simple(&fbx); // Go over all FBX root nodes and turn them into SMD data let mut smd = Smd::new(); process_fbx_node( &fbx, &fbx_tree, &mut smd, &Matrix4::identity(), None, flip_fix_list )?; Ok(smd) } pub fn create_animation_smd( ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>, ) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); // Read in the animation data itself let animation = Animation::from_simple(&fbx).unwrap(); // Count and log frames let frame_count = animation.frame_count(&fbx); task_log(format!("Animation has {} frames", frame_count)); // Copy over every bone to the new animation SMD let mut smd = Smd::new(); for bone in &ref_smd.bones { smd.bones.push(bone.clone()); } // Finally, turn the animation data into bone positions in the SMD for frame in 0..frame_count { // First transform the FBX for this frame animation.transform_fbx_to_frame(&mut fbx, frame); // Now go over all models for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") { // For this model, look up the matching BoneId in the reference SMD if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) { // Now that we have a model and a bone, we need the current translation and rotation // for the model let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list); // And now that we have those, finally add the bone data to the animation SMD smd.set_animation(frame, bone_id, SmdAnimationFrameBone { translation: translation.into(), rotation: rotation.into(), }); } } } Ok(smd) } fn process_fbx_node( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, mut smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { // Perform node type specific information match fbx_node.object.class { ObjectType::Geometry(ref geometry) => process_geometry(smd, geometry, matrix, current_bone.unwrap()), ObjectType::Model(ref _model) => process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?, _ => { // Just go straight to the children for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?; } } } Ok(()) } fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) { // Add triangles to parent node let tris = geometry.triangles(); for tri in tris { // Turn the vertices in this triangle to SMD vertices let mut smd_verts: [SmdVertex; 3] = Default::default(); for (i, vert) in tri.iter().enumerate() { // Multiply the vectors that need to be multiplied let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0); let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0); smd_verts[i] = SmdVertex { parent_bone: current_bone.id, // This is overwritten by links position: pos.truncate().into(), normal: norm.truncate().into(), uv: vert.2, links: vec!( /*Not needed, we aren't using weights anyways so this done by parent_bone SmdLink { bone: bone_id, weight: 1.0, }*/ ) }; } // Add the actual SMD triangle smd.triangles.push(SmdTriangle { material: "layl_test_texture".into(), vertices: smd_verts, }); } } fn process_model( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name))); let properties = ModelProperties::from_generic(&fbx_node.object.properties); // Create a new transformation matrix let local_matrix = local_matrices(&properties); // Create a new bone let new_bone = smd.new_bone( &id_name(&fbx_node.object.name).unwrap(), current_bone.map(|b| b.id) ) .ok_or_else(|| Error::Task(format!( "Bone \"{}\" exists multiple times in the FBX", &fbx_node.object.name )))? .clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later // Set the transformations on this bone let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list); let first_frame = SmdAnimationFrameBone { // This needs to be derived from the matrix to get the right location translation: translation.into(), // This can just be directly copied over rotation: rotation.into(), }; smd.set_animation(0, new_bone.id, first_frame); // Make new matrices for children let matrix = matrix * local_matrix; // Make sure the child nodes will receive this new bone for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?; } Ok(()) } /// Returns (Translation, Rotation) fn calculate_animation_transforms_for( fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>, ) -> (Vector3<f32>, Vector3<f32>) { let properties = ModelProperties::from_generic(&obj.properties); // Get the bone's translation let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj); let prop_translation: Vector3<_> = properties.translation.into(); let prop_rot_offset: Vector3<_> = properties.rotation_offset.into(); let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into(); let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot; // Check if this bone's in the flip fix list // TODO: Get an actual fix instead of this dirty manual hack let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap()); // We want the rotation, but we've got multiple rotations, so combine them let pre_rotation = Quaternion::from(Euler::new( Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2]) )); let rotation = Quaternion::from(Euler::new( Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2]) )); let post_rotation = Quaternion::from(Euler::new( Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2]) )); let total_rotation = if!flip { Euler::from(post_rotation.invert() * rotation * pre_rotation) } else { Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation) }; let rotation = Vector3::new( total_rotation.x.0, total_rotation.y.0, total_rotation.z.0, );
} fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> { // First actually get the parent's model data let parent_obj = if let Some(v) = fbx.parent_of(obj.id) { if v == 0 { // At root, no extra translation return Vector3::new(0.0, 0.0, 0.0) } v } else { // No parent, no extra translation return Vector3::new(0.0, 0.0, 0.0) }; let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties); // Now add up all the translations applied after rotation let rotation_pivot: Vector3<_> = props.rotation_pivot.into(); let scale_offset: Vector3<_> = props.scale_offset.into(); let translation = -rotation_pivot + scale_offset; translation } fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> { // Create various matrices let rotation_offset = properties.rotation_offset.into(); let rotation_offset_mat = Matrix4::from_translation(rotation_offset); let rotation_pivot: Vector3<_> = properties.rotation_pivot.into(); let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot); let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation); let rotation = euler_rotation_to_matrix(properties.rotation); let post_rotation = euler_rotation_to_matrix(properties.post_rotation); let scale_offset = properties.scale_offset.into(); let scale_offset_mat = Matrix4::from_translation(scale_offset); let scale_pivot: Vector3<_> = properties.scale_pivot.into(); let scale_pivot_mat = Matrix4::from_translation(scale_pivot); let scale = Matrix4::from_nonuniform_scale( properties.scale[0], properties.scale[1], properties.scale[2] ); let local_matrix_for_vertices = Matrix4::from_translation(properties.translation.into()) * // Rotation rotation_offset_mat * rotation_pivot_mat * pre_rotation * rotation * post_rotation.invert().unwrap() * rotation_pivot_mat.invert().unwrap() * // Scale scale_offset_mat * scale_pivot_mat * scale * scale_pivot_mat.invert().unwrap(); local_matrix_for_vertices } fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> { Matrix4::from_angle_z(Deg(rot_degs[2])) * Matrix4::from_angle_y(Deg(rot_degs[1])) * Matrix4::from_angle_x(Deg(rot_degs[0])) }
(translation, rotation)
random_line_split
smd.rs
use std::fs::File; use std::io::{BufReader}; use std::path::PathBuf; use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation}; use soto::task::{task_log}; use soto::Error; use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode}; use sotolib_fbx::animation::{Animation}; use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry}; use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone}; pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); let fbx_tree = ObjectTreeNode::from_simple(&fbx); // Go over all FBX root nodes and turn them into SMD data let mut smd = Smd::new(); process_fbx_node( &fbx, &fbx_tree, &mut smd, &Matrix4::identity(), None, flip_fix_list )?; Ok(smd) } pub fn create_animation_smd( ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>, ) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); // Read in the animation data itself let animation = Animation::from_simple(&fbx).unwrap(); // Count and log frames let frame_count = animation.frame_count(&fbx); task_log(format!("Animation has {} frames", frame_count)); // Copy over every bone to the new animation SMD let mut smd = Smd::new(); for bone in &ref_smd.bones { smd.bones.push(bone.clone()); } // Finally, turn the animation data into bone positions in the SMD for frame in 0..frame_count { // First transform the FBX for this frame animation.transform_fbx_to_frame(&mut fbx, frame); // Now go over all models for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") { // For this model, look up the matching BoneId in the reference SMD if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) { // Now that we have a model and a bone, we need the current translation and rotation // for the model let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list); // And now that we have those, finally add the bone data to the animation SMD smd.set_animation(frame, bone_id, SmdAnimationFrameBone { translation: translation.into(), rotation: rotation.into(), }); } } } Ok(smd) } fn process_fbx_node( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, mut smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { // Perform node type specific information match fbx_node.object.class { ObjectType::Geometry(ref geometry) => process_geometry(smd, geometry, matrix, current_bone.unwrap()), ObjectType::Model(ref _model) => process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?, _ => { // Just go straight to the children for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?; } } } Ok(()) } fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) { // Add triangles to parent node let tris = geometry.triangles(); for tri in tris { // Turn the vertices in this triangle to SMD vertices let mut smd_verts: [SmdVertex; 3] = Default::default(); for (i, vert) in tri.iter().enumerate() { // Multiply the vectors that need to be multiplied let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0); let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0); smd_verts[i] = SmdVertex { parent_bone: current_bone.id, // This is overwritten by links position: pos.truncate().into(), normal: norm.truncate().into(), uv: vert.2, links: vec!( /*Not needed, we aren't using weights anyways so this done by parent_bone SmdLink { bone: bone_id, weight: 1.0, }*/ ) }; } // Add the actual SMD triangle smd.triangles.push(SmdTriangle { material: "layl_test_texture".into(), vertices: smd_verts, }); } } fn process_model( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name))); let properties = ModelProperties::from_generic(&fbx_node.object.properties); // Create a new transformation matrix let local_matrix = local_matrices(&properties); // Create a new bone let new_bone = smd.new_bone( &id_name(&fbx_node.object.name).unwrap(), current_bone.map(|b| b.id) ) .ok_or_else(|| Error::Task(format!( "Bone \"{}\" exists multiple times in the FBX", &fbx_node.object.name )))? .clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later // Set the transformations on this bone let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list); let first_frame = SmdAnimationFrameBone { // This needs to be derived from the matrix to get the right location translation: translation.into(), // This can just be directly copied over rotation: rotation.into(), }; smd.set_animation(0, new_bone.id, first_frame); // Make new matrices for children let matrix = matrix * local_matrix; // Make sure the child nodes will receive this new bone for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?; } Ok(()) } /// Returns (Translation, Rotation) fn calculate_animation_transforms_for( fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>, ) -> (Vector3<f32>, Vector3<f32>) { let properties = ModelProperties::from_generic(&obj.properties); // Get the bone's translation let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj); let prop_translation: Vector3<_> = properties.translation.into(); let prop_rot_offset: Vector3<_> = properties.rotation_offset.into(); let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into(); let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot; // Check if this bone's in the flip fix list // TODO: Get an actual fix instead of this dirty manual hack let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap()); // We want the rotation, but we've got multiple rotations, so combine them let pre_rotation = Quaternion::from(Euler::new( Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2]) )); let rotation = Quaternion::from(Euler::new( Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2]) )); let post_rotation = Quaternion::from(Euler::new( Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2]) )); let total_rotation = if!flip { Euler::from(post_rotation.invert() * rotation * pre_rotation) } else { Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation) }; let rotation = Vector3::new( total_rotation.x.0, total_rotation.y.0, total_rotation.z.0, ); (translation, rotation) } fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> { // First actually get the parent's model data let parent_obj = if let Some(v) = fbx.parent_of(obj.id) { if v == 0 { // At root, no extra translation return Vector3::new(0.0, 0.0, 0.0) } v } else { // No parent, no extra translation return Vector3::new(0.0, 0.0, 0.0) }; let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties); // Now add up all the translations applied after rotation let rotation_pivot: Vector3<_> = props.rotation_pivot.into(); let scale_offset: Vector3<_> = props.scale_offset.into(); let translation = -rotation_pivot + scale_offset; translation } fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> { // Create various matrices let rotation_offset = properties.rotation_offset.into(); let rotation_offset_mat = Matrix4::from_translation(rotation_offset); let rotation_pivot: Vector3<_> = properties.rotation_pivot.into(); let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot); let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation); let rotation = euler_rotation_to_matrix(properties.rotation); let post_rotation = euler_rotation_to_matrix(properties.post_rotation); let scale_offset = properties.scale_offset.into(); let scale_offset_mat = Matrix4::from_translation(scale_offset); let scale_pivot: Vector3<_> = properties.scale_pivot.into(); let scale_pivot_mat = Matrix4::from_translation(scale_pivot); let scale = Matrix4::from_nonuniform_scale( properties.scale[0], properties.scale[1], properties.scale[2] ); let local_matrix_for_vertices = Matrix4::from_translation(properties.translation.into()) * // Rotation rotation_offset_mat * rotation_pivot_mat * pre_rotation * rotation * post_rotation.invert().unwrap() * rotation_pivot_mat.invert().unwrap() * // Scale scale_offset_mat * scale_pivot_mat * scale * scale_pivot_mat.invert().unwrap(); local_matrix_for_vertices } fn
(rot_degs: [f32; 3]) -> Matrix4<f32> { Matrix4::from_angle_z(Deg(rot_degs[2])) * Matrix4::from_angle_y(Deg(rot_degs[1])) * Matrix4::from_angle_x(Deg(rot_degs[0])) }
euler_rotation_to_matrix
identifier_name
smd.rs
use std::fs::File; use std::io::{BufReader}; use std::path::PathBuf; use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation}; use soto::task::{task_log}; use soto::Error; use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode}; use sotolib_fbx::animation::{Animation}; use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry}; use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone}; pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); let fbx_tree = ObjectTreeNode::from_simple(&fbx); // Go over all FBX root nodes and turn them into SMD data let mut smd = Smd::new(); process_fbx_node( &fbx, &fbx_tree, &mut smd, &Matrix4::identity(), None, flip_fix_list )?; Ok(smd) } pub fn create_animation_smd( ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>, ) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); // Read in the animation data itself let animation = Animation::from_simple(&fbx).unwrap(); // Count and log frames let frame_count = animation.frame_count(&fbx); task_log(format!("Animation has {} frames", frame_count)); // Copy over every bone to the new animation SMD let mut smd = Smd::new(); for bone in &ref_smd.bones { smd.bones.push(bone.clone()); } // Finally, turn the animation data into bone positions in the SMD for frame in 0..frame_count { // First transform the FBX for this frame animation.transform_fbx_to_frame(&mut fbx, frame); // Now go over all models for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") { // For this model, look up the matching BoneId in the reference SMD if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) { // Now that we have a model and a bone, we need the current translation and rotation // for the model let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list); // And now that we have those, finally add the bone data to the animation SMD smd.set_animation(frame, bone_id, SmdAnimationFrameBone { translation: translation.into(), rotation: rotation.into(), }); } } } Ok(smd) } fn process_fbx_node( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, mut smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { // Perform node type specific information match fbx_node.object.class { ObjectType::Geometry(ref geometry) => process_geometry(smd, geometry, matrix, current_bone.unwrap()), ObjectType::Model(ref _model) => process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?, _ => { // Just go straight to the children for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?; } } } Ok(()) } fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) { // Add triangles to parent node let tris = geometry.triangles(); for tri in tris { // Turn the vertices in this triangle to SMD vertices let mut smd_verts: [SmdVertex; 3] = Default::default(); for (i, vert) in tri.iter().enumerate() { // Multiply the vectors that need to be multiplied let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0); let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0); smd_verts[i] = SmdVertex { parent_bone: current_bone.id, // This is overwritten by links position: pos.truncate().into(), normal: norm.truncate().into(), uv: vert.2, links: vec!( /*Not needed, we aren't using weights anyways so this done by parent_bone SmdLink { bone: bone_id, weight: 1.0, }*/ ) }; } // Add the actual SMD triangle smd.triangles.push(SmdTriangle { material: "layl_test_texture".into(), vertices: smd_verts, }); } } fn process_model( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name))); let properties = ModelProperties::from_generic(&fbx_node.object.properties); // Create a new transformation matrix let local_matrix = local_matrices(&properties); // Create a new bone let new_bone = smd.new_bone( &id_name(&fbx_node.object.name).unwrap(), current_bone.map(|b| b.id) ) .ok_or_else(|| Error::Task(format!( "Bone \"{}\" exists multiple times in the FBX", &fbx_node.object.name )))? .clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later // Set the transformations on this bone let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list); let first_frame = SmdAnimationFrameBone { // This needs to be derived from the matrix to get the right location translation: translation.into(), // This can just be directly copied over rotation: rotation.into(), }; smd.set_animation(0, new_bone.id, first_frame); // Make new matrices for children let matrix = matrix * local_matrix; // Make sure the child nodes will receive this new bone for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?; } Ok(()) } /// Returns (Translation, Rotation) fn calculate_animation_transforms_for( fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>, ) -> (Vector3<f32>, Vector3<f32>)
)); let post_rotation = Quaternion::from(Euler::new( Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2]) )); let total_rotation = if!flip { Euler::from(post_rotation.invert() * rotation * pre_rotation) } else { Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation) }; let rotation = Vector3::new( total_rotation.x.0, total_rotation.y.0, total_rotation.z.0, ); (translation, rotation) } fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> { // First actually get the parent's model data let parent_obj = if let Some(v) = fbx.parent_of(obj.id) { if v == 0 { // At root, no extra translation return Vector3::new(0.0, 0.0, 0.0) } v } else { // No parent, no extra translation return Vector3::new(0.0, 0.0, 0.0) }; let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties); // Now add up all the translations applied after rotation let rotation_pivot: Vector3<_> = props.rotation_pivot.into(); let scale_offset: Vector3<_> = props.scale_offset.into(); let translation = -rotation_pivot + scale_offset; translation } fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> { // Create various matrices let rotation_offset = properties.rotation_offset.into(); let rotation_offset_mat = Matrix4::from_translation(rotation_offset); let rotation_pivot: Vector3<_> = properties.rotation_pivot.into(); let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot); let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation); let rotation = euler_rotation_to_matrix(properties.rotation); let post_rotation = euler_rotation_to_matrix(properties.post_rotation); let scale_offset = properties.scale_offset.into(); let scale_offset_mat = Matrix4::from_translation(scale_offset); let scale_pivot: Vector3<_> = properties.scale_pivot.into(); let scale_pivot_mat = Matrix4::from_translation(scale_pivot); let scale = Matrix4::from_nonuniform_scale( properties.scale[0], properties.scale[1], properties.scale[2] ); let local_matrix_for_vertices = Matrix4::from_translation(properties.translation.into()) * // Rotation rotation_offset_mat * rotation_pivot_mat * pre_rotation * rotation * post_rotation.invert().unwrap() * rotation_pivot_mat.invert().unwrap() * // Scale scale_offset_mat * scale_pivot_mat * scale * scale_pivot_mat.invert().unwrap(); local_matrix_for_vertices } fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> { Matrix4::from_angle_z(Deg(rot_degs[2])) * Matrix4::from_angle_y(Deg(rot_degs[1])) * Matrix4::from_angle_x(Deg(rot_degs[0])) }
{ let properties = ModelProperties::from_generic(&obj.properties); // Get the bone's translation let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj); let prop_translation: Vector3<_> = properties.translation.into(); let prop_rot_offset: Vector3<_> = properties.rotation_offset.into(); let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into(); let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot; // Check if this bone's in the flip fix list // TODO: Get an actual fix instead of this dirty manual hack let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap()); // We want the rotation, but we've got multiple rotations, so combine them let pre_rotation = Quaternion::from(Euler::new( Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2]) )); let rotation = Quaternion::from(Euler::new( Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
identifier_body
smd.rs
use std::fs::File; use std::io::{BufReader}; use std::path::PathBuf; use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation}; use soto::task::{task_log}; use soto::Error; use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode}; use sotolib_fbx::animation::{Animation}; use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry}; use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone}; pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); let fbx_tree = ObjectTreeNode::from_simple(&fbx); // Go over all FBX root nodes and turn them into SMD data let mut smd = Smd::new(); process_fbx_node( &fbx, &fbx_tree, &mut smd, &Matrix4::identity(), None, flip_fix_list )?; Ok(smd) } pub fn create_animation_smd( ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>, ) -> Result<Smd, Error> { // Read in the fbx we got told to convert let file = BufReader::new(File::open(&fbx).unwrap()); let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap(); // Read in the animation data itself let animation = Animation::from_simple(&fbx).unwrap(); // Count and log frames let frame_count = animation.frame_count(&fbx); task_log(format!("Animation has {} frames", frame_count)); // Copy over every bone to the new animation SMD let mut smd = Smd::new(); for bone in &ref_smd.bones { smd.bones.push(bone.clone()); } // Finally, turn the animation data into bone positions in the SMD for frame in 0..frame_count { // First transform the FBX for this frame animation.transform_fbx_to_frame(&mut fbx, frame); // Now go over all models for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") { // For this model, look up the matching BoneId in the reference SMD if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) { // Now that we have a model and a bone, we need the current translation and rotation // for the model let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list); // And now that we have those, finally add the bone data to the animation SMD smd.set_animation(frame, bone_id, SmdAnimationFrameBone { translation: translation.into(), rotation: rotation.into(), }); } } } Ok(smd) } fn process_fbx_node( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, mut smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { // Perform node type specific information match fbx_node.object.class { ObjectType::Geometry(ref geometry) => process_geometry(smd, geometry, matrix, current_bone.unwrap()), ObjectType::Model(ref _model) => process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?, _ => { // Just go straight to the children for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?; } } } Ok(()) } fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) { // Add triangles to parent node let tris = geometry.triangles(); for tri in tris { // Turn the vertices in this triangle to SMD vertices let mut smd_verts: [SmdVertex; 3] = Default::default(); for (i, vert) in tri.iter().enumerate() { // Multiply the vectors that need to be multiplied let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0); let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0); smd_verts[i] = SmdVertex { parent_bone: current_bone.id, // This is overwritten by links position: pos.truncate().into(), normal: norm.truncate().into(), uv: vert.2, links: vec!( /*Not needed, we aren't using weights anyways so this done by parent_bone SmdLink { bone: bone_id, weight: 1.0, }*/ ) }; } // Add the actual SMD triangle smd.triangles.push(SmdTriangle { material: "layl_test_texture".into(), vertices: smd_verts, }); } } fn process_model( fbx: &SimpleFbx, fbx_node: &ObjectTreeNode, smd: &mut Smd, matrix: &Matrix4<f32>, current_bone: Option<&SmdBone>, flip_fix_list: &Vec<String>, ) -> Result<(), Error> { task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name))); let properties = ModelProperties::from_generic(&fbx_node.object.properties); // Create a new transformation matrix let local_matrix = local_matrices(&properties); // Create a new bone let new_bone = smd.new_bone( &id_name(&fbx_node.object.name).unwrap(), current_bone.map(|b| b.id) ) .ok_or_else(|| Error::Task(format!( "Bone \"{}\" exists multiple times in the FBX", &fbx_node.object.name )))? .clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later // Set the transformations on this bone let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list); let first_frame = SmdAnimationFrameBone { // This needs to be derived from the matrix to get the right location translation: translation.into(), // This can just be directly copied over rotation: rotation.into(), }; smd.set_animation(0, new_bone.id, first_frame); // Make new matrices for children let matrix = matrix * local_matrix; // Make sure the child nodes will receive this new bone for node in &fbx_node.nodes { process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?; } Ok(()) } /// Returns (Translation, Rotation) fn calculate_animation_transforms_for( fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>, ) -> (Vector3<f32>, Vector3<f32>) { let properties = ModelProperties::from_generic(&obj.properties); // Get the bone's translation let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj); let prop_translation: Vector3<_> = properties.translation.into(); let prop_rot_offset: Vector3<_> = properties.rotation_offset.into(); let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into(); let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot; // Check if this bone's in the flip fix list // TODO: Get an actual fix instead of this dirty manual hack let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap()); // We want the rotation, but we've got multiple rotations, so combine them let pre_rotation = Quaternion::from(Euler::new( Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2]) )); let rotation = Quaternion::from(Euler::new( Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2]) )); let post_rotation = Quaternion::from(Euler::new( Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2]) )); let total_rotation = if!flip
else { Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation) }; let rotation = Vector3::new( total_rotation.x.0, total_rotation.y.0, total_rotation.z.0, ); (translation, rotation) } fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> { // First actually get the parent's model data let parent_obj = if let Some(v) = fbx.parent_of(obj.id) { if v == 0 { // At root, no extra translation return Vector3::new(0.0, 0.0, 0.0) } v } else { // No parent, no extra translation return Vector3::new(0.0, 0.0, 0.0) }; let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties); // Now add up all the translations applied after rotation let rotation_pivot: Vector3<_> = props.rotation_pivot.into(); let scale_offset: Vector3<_> = props.scale_offset.into(); let translation = -rotation_pivot + scale_offset; translation } fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> { // Create various matrices let rotation_offset = properties.rotation_offset.into(); let rotation_offset_mat = Matrix4::from_translation(rotation_offset); let rotation_pivot: Vector3<_> = properties.rotation_pivot.into(); let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot); let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation); let rotation = euler_rotation_to_matrix(properties.rotation); let post_rotation = euler_rotation_to_matrix(properties.post_rotation); let scale_offset = properties.scale_offset.into(); let scale_offset_mat = Matrix4::from_translation(scale_offset); let scale_pivot: Vector3<_> = properties.scale_pivot.into(); let scale_pivot_mat = Matrix4::from_translation(scale_pivot); let scale = Matrix4::from_nonuniform_scale( properties.scale[0], properties.scale[1], properties.scale[2] ); let local_matrix_for_vertices = Matrix4::from_translation(properties.translation.into()) * // Rotation rotation_offset_mat * rotation_pivot_mat * pre_rotation * rotation * post_rotation.invert().unwrap() * rotation_pivot_mat.invert().unwrap() * // Scale scale_offset_mat * scale_pivot_mat * scale * scale_pivot_mat.invert().unwrap(); local_matrix_for_vertices } fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> { Matrix4::from_angle_z(Deg(rot_degs[2])) * Matrix4::from_angle_y(Deg(rot_degs[1])) * Matrix4::from_angle_x(Deg(rot_degs[0])) }
{ Euler::from(post_rotation.invert() * rotation * pre_rotation) }
conditional_block
in_memory.rs
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT //! Implementation of an in-memory repository. use std::{ borrow::Cow, collections::{hash_map::DefaultHasher, BTreeMap, VecDeque}, hash::Hasher, mem::size_of, sync::{atomic::Ordering, Arc}, thread::JoinHandle, }; use crossbeam_channel::Sender; use crypto::hash::ContextHash; use tezos_timing::RepositoryMemoryUsage; use crate::{ gc::{ worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT}, GarbageCollectionError, GarbageCollector, }, hash::ObjectHash, persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable}, working_tree::{ shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings}, storage::DirEntryId, string_interner::{StringId, StringInterner}, }, Map, }; use tezos_spsc::Consumer; use super::{index_map::IndexMap, HashIdError}; use super::{HashId, VacantObjectHash}; #[derive(Debug)] pub struct HashValueStore { hashes: IndexMap<HashId, ObjectHash>, values: IndexMap<HashId, Option<Arc<[u8]>>>, free_ids: Option<Consumer<HashId>>, new_ids: Vec<HashId>, values_bytes: usize, } impl HashValueStore { pub(crate) fn new<T>(consumer: T) -> Self where T: Into<Option<Consumer<HashId>>>, { Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: consumer.into(), new_ids: Vec::with_capacity(1024), values_bytes: 0, } } pub fn get_memory_usage(&self) -> RepositoryMemoryUsage { let values_bytes = self.values_bytes; let values_capacity = self.values.capacity(); let hashes_capacity = self.hashes.capacity(); let total_bytes = values_bytes .saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>()) .saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters .saturating_add(hashes_capacity * size_of::<ObjectHash>()); RepositoryMemoryUsage { values_bytes, values_capacity, values_length: self.values.len(), hashes_capacity, hashes_length: self.hashes.len(), total_bytes, npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0), gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire), nshapes: 0, } } pub(crate) fn clear(&mut self) { *self = Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: self.free_ids.take(), new_ids: Vec::new(), values_bytes: 0, } } pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> { let (hash_id, entry) = if let Some(free_id) = self.get_free_id() { if let Some(old_value) = self.values.set(free_id, None)? { self.values_bytes = self.values_bytes.saturating_sub(old_value.len()); } (free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?) } else { self.hashes.get_vacant_entry()? }; self.new_ids.push(hash_id); Ok(VacantObjectHash { entry: Some(entry), hash_id, }) }
self.free_ids.as_mut()?.pop().ok() } pub(crate) fn insert_value_at( &mut self, hash_id: HashId, value: Arc<[u8]>, ) -> Result<(), HashIdError> { self.values_bytes = self.values_bytes.saturating_add(value.len()); if let Some(old) = self.values.insert_at(hash_id, Some(value))? { self.values_bytes = self.values_bytes.saturating_sub(old.len()); } Ok(()) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> { self.hashes.get(hash_id) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> { match self.values.get(hash_id)? { Some(value) => Ok(value.as_ref().map(|v| v.as_ref())), None => Ok(None), } } pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> { Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some()) } fn take_new_ids(&mut self) -> Vec<HashId> { let new_ids = self.new_ids.clone(); self.new_ids.clear(); new_ids } } pub struct InMemory { current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>, pub hashes: HashValueStore, sender: Option<Sender<Command>>, pub context_hashes: Map<u64, HashId>, context_hashes_cycles: VecDeque<Vec<u64>>, thread_handle: Option<JoinHandle<()>>, shapes: DirectoryShapes, string_interner: StringInterner, } impl GarbageCollector for InMemory { fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> { self.new_cycle_started(); Ok(()) } fn block_applied( &mut self, referenced_older_objects: Vec<HashId>, ) -> Result<(), GarbageCollectionError> { self.block_applied(referenced_older_objects); Ok(()) } } impl Flushable for InMemory { fn flush(&self) -> Result<(), anyhow::Error> { Ok(()) } } impl Persistable for InMemory { fn is_persistent(&self) -> bool { false } } impl KeyValueStoreBackend for InMemory { fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { self.write_batch(batch) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.contains(hash_id) } fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> { self.put_context_hash_impl(hash_id) } fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> { Ok(self.get_context_hash_impl(context_hash)) } fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> { Ok(self.get_hash(hash_id)?.map(Cow::Borrowed)) } fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> { Ok(self.get_value(hash_id)?.map(Cow::Borrowed)) } fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.get_vacant_entry_hash() } fn clear_objects(&mut self) -> Result<(), DBError> { // `InMemory` has its own garbage collection Ok(()) } fn memory_usage(&self) -> RepositoryMemoryUsage { let mut mem = self.hashes.get_memory_usage(); mem.nshapes = self.shapes.nshapes(); mem } fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> { self.shapes .get_shape(shape_id) .map(ShapeStrings::SliceIds) .map_err(Into::into) } fn make_shape( &mut self, dir: &[(StringId, DirEntryId)], ) -> Result<Option<DirectoryShapeId>, DBError> { self.shapes.make_shape(dir).map_err(Into::into) } fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> { self.string_interner.extend_from(string_interner); Ok(()) } fn get_str(&self, string_id: StringId) -> Option<&str> { self.string_interner.get(string_id) } } impl InMemory { pub fn try_new() -> Result<Self, std::io::Error> { // TODO - TE-210: Remove once we hace proper support for history modes. let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC") .unwrap_or_else(|_| "false".to_string()) .parse::<bool>() .expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool"); let (sender, cons, thread_handle) = if garbage_collector_disabled { (None, None, None) } else { let (sender, recv) = crossbeam_channel::unbounded(); let (prod, cons) = tezos_spsc::bounded(2_000_000); let thread_handle = std::thread::Builder::new() .name("ctx-inmem-gc-thread".to_string()) .spawn(move || { GCThread { cycles: Cycles::default(), recv, free_ids: prod, pending: Vec::new(), } .run() })?; (Some(sender), Some(cons), Some(thread_handle)) }; let current_cycle = Default::default(); let hashes = HashValueStore::new(cons); let context_hashes = Default::default(); let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT); for _ in 0..PRESERVE_CYCLE_COUNT { context_hashes_cycles.push_back(Default::default()) } Ok(Self { current_cycle, hashes, sender, context_hashes, context_hashes_cycles, thread_handle, shapes: DirectoryShapes::default(), string_interner: StringInterner::default(), }) } pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.hashes.get_vacant_object_hash().map_err(Into::into) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> { self.hashes.get_hash(hash_id).map_err(Into::into) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> { self.hashes.get_value(hash_id).map_err(Into::into) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.hashes.contains(hash_id).map_err(Into::into) } pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { for (hash_id, value) in batch { self.hashes.insert_value_at(hash_id, Arc::clone(&value))?; self.current_cycle.insert(hash_id, Some(value)); } Ok(()) } pub fn new_cycle_started(&mut self) { if let Some(sender) = &self.sender { let values_in_cycle = std::mem::take(&mut self.current_cycle); let new_ids = self.hashes.take_new_ids(); if let Err(e) = sender.try_send(Command::StartNewCycle { values_in_cycle, new_ids, }) { eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e); } if let Some(unused) = self.context_hashes_cycles.pop_front() { for hash in unused { self.context_hashes.remove(&hash); } } self.context_hashes_cycles.push_back(Default::default()); } } pub fn block_applied(&mut self, reused: Vec<HashId>) { if let Some(sender) = &self.sender { if let Err(e) = sender.send(Command::MarkReused { reused }) { eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e); } } } pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> { let mut hasher = DefaultHasher::new(); hasher.write(context_hash.as_ref()); let hashed = hasher.finish(); self.context_hashes.get(&hashed).cloned() } pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> { let commit_hash = self .hashes .get_hash(commit_hash_id)? .ok_or(DBError::MissingObject { hash_id: commit_hash_id, })?; let mut hasher = DefaultHasher::new(); hasher.write(&commit_hash[..]); let hashed = hasher.finish(); self.context_hashes.insert(hashed, commit_hash_id); if let Some(back) = self.context_hashes_cycles.back_mut() { back.push(hashed); }; Ok(()) } #[cfg(test)] pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId { let vacant = self.get_vacant_entry_hash().unwrap(); vacant.write_with(|entry| *entry = entry_hash) } } impl Drop for InMemory { fn drop(&mut self) { let sender = match self.sender.take() { Some(sender) => sender, None => return, }; if let Err(e) = sender.send(Command::Close) { eprintln!("Fail to send Command::Close to GC worker: {:?}", e); return; } let thread_handle = match self.thread_handle.take() { Some(thread_handle) => thread_handle, None => return, }; if let Err(e) = thread_handle.join() { eprintln!("Fail to join GC worker thread: {:?}", e); } } }
fn get_free_id(&mut self) -> Option<HashId> {
random_line_split
in_memory.rs
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT //! Implementation of an in-memory repository. use std::{ borrow::Cow, collections::{hash_map::DefaultHasher, BTreeMap, VecDeque}, hash::Hasher, mem::size_of, sync::{atomic::Ordering, Arc}, thread::JoinHandle, }; use crossbeam_channel::Sender; use crypto::hash::ContextHash; use tezos_timing::RepositoryMemoryUsage; use crate::{ gc::{ worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT}, GarbageCollectionError, GarbageCollector, }, hash::ObjectHash, persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable}, working_tree::{ shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings}, storage::DirEntryId, string_interner::{StringId, StringInterner}, }, Map, }; use tezos_spsc::Consumer; use super::{index_map::IndexMap, HashIdError}; use super::{HashId, VacantObjectHash}; #[derive(Debug)] pub struct HashValueStore { hashes: IndexMap<HashId, ObjectHash>, values: IndexMap<HashId, Option<Arc<[u8]>>>, free_ids: Option<Consumer<HashId>>, new_ids: Vec<HashId>, values_bytes: usize, } impl HashValueStore { pub(crate) fn new<T>(consumer: T) -> Self where T: Into<Option<Consumer<HashId>>>, { Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: consumer.into(), new_ids: Vec::with_capacity(1024), values_bytes: 0, } } pub fn get_memory_usage(&self) -> RepositoryMemoryUsage { let values_bytes = self.values_bytes; let values_capacity = self.values.capacity(); let hashes_capacity = self.hashes.capacity(); let total_bytes = values_bytes .saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>()) .saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters .saturating_add(hashes_capacity * size_of::<ObjectHash>()); RepositoryMemoryUsage { values_bytes, values_capacity, values_length: self.values.len(), hashes_capacity, hashes_length: self.hashes.len(), total_bytes, npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0), gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire), nshapes: 0, } } pub(crate) fn clear(&mut self) { *self = Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: self.free_ids.take(), new_ids: Vec::new(), values_bytes: 0, } } pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> { let (hash_id, entry) = if let Some(free_id) = self.get_free_id() { if let Some(old_value) = self.values.set(free_id, None)? { self.values_bytes = self.values_bytes.saturating_sub(old_value.len()); } (free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?) } else { self.hashes.get_vacant_entry()? }; self.new_ids.push(hash_id); Ok(VacantObjectHash { entry: Some(entry), hash_id, }) } fn get_free_id(&mut self) -> Option<HashId> { self.free_ids.as_mut()?.pop().ok() } pub(crate) fn insert_value_at( &mut self, hash_id: HashId, value: Arc<[u8]>, ) -> Result<(), HashIdError> { self.values_bytes = self.values_bytes.saturating_add(value.len()); if let Some(old) = self.values.insert_at(hash_id, Some(value))? { self.values_bytes = self.values_bytes.saturating_sub(old.len()); } Ok(()) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> { self.hashes.get(hash_id) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> { match self.values.get(hash_id)? { Some(value) => Ok(value.as_ref().map(|v| v.as_ref())), None => Ok(None), } } pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> { Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some()) } fn take_new_ids(&mut self) -> Vec<HashId> { let new_ids = self.new_ids.clone(); self.new_ids.clear(); new_ids } } pub struct InMemory { current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>, pub hashes: HashValueStore, sender: Option<Sender<Command>>, pub context_hashes: Map<u64, HashId>, context_hashes_cycles: VecDeque<Vec<u64>>, thread_handle: Option<JoinHandle<()>>, shapes: DirectoryShapes, string_interner: StringInterner, } impl GarbageCollector for InMemory { fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> { self.new_cycle_started(); Ok(()) } fn block_applied( &mut self, referenced_older_objects: Vec<HashId>, ) -> Result<(), GarbageCollectionError> { self.block_applied(referenced_older_objects); Ok(()) } } impl Flushable for InMemory { fn flush(&self) -> Result<(), anyhow::Error> { Ok(()) } } impl Persistable for InMemory { fn is_persistent(&self) -> bool { false } } impl KeyValueStoreBackend for InMemory { fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { self.write_batch(batch) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.contains(hash_id) } fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> { self.put_context_hash_impl(hash_id) } fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> { Ok(self.get_context_hash_impl(context_hash)) } fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> { Ok(self.get_hash(hash_id)?.map(Cow::Borrowed)) } fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> { Ok(self.get_value(hash_id)?.map(Cow::Borrowed)) } fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.get_vacant_entry_hash() } fn clear_objects(&mut self) -> Result<(), DBError> { // `InMemory` has its own garbage collection Ok(()) } fn memory_usage(&self) -> RepositoryMemoryUsage { let mut mem = self.hashes.get_memory_usage(); mem.nshapes = self.shapes.nshapes(); mem } fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> { self.shapes .get_shape(shape_id) .map(ShapeStrings::SliceIds) .map_err(Into::into) } fn make_shape( &mut self, dir: &[(StringId, DirEntryId)], ) -> Result<Option<DirectoryShapeId>, DBError> { self.shapes.make_shape(dir).map_err(Into::into) } fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> { self.string_interner.extend_from(string_interner); Ok(()) } fn get_str(&self, string_id: StringId) -> Option<&str> { self.string_interner.get(string_id) } } impl InMemory { pub fn try_new() -> Result<Self, std::io::Error> { // TODO - TE-210: Remove once we hace proper support for history modes. let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC") .unwrap_or_else(|_| "false".to_string()) .parse::<bool>() .expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool"); let (sender, cons, thread_handle) = if garbage_collector_disabled { (None, None, None) } else { let (sender, recv) = crossbeam_channel::unbounded(); let (prod, cons) = tezos_spsc::bounded(2_000_000); let thread_handle = std::thread::Builder::new() .name("ctx-inmem-gc-thread".to_string()) .spawn(move || { GCThread { cycles: Cycles::default(), recv, free_ids: prod, pending: Vec::new(), } .run() })?; (Some(sender), Some(cons), Some(thread_handle)) }; let current_cycle = Default::default(); let hashes = HashValueStore::new(cons); let context_hashes = Default::default(); let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT); for _ in 0..PRESERVE_CYCLE_COUNT { context_hashes_cycles.push_back(Default::default()) } Ok(Self { current_cycle, hashes, sender, context_hashes, context_hashes_cycles, thread_handle, shapes: DirectoryShapes::default(), string_interner: StringInterner::default(), }) } pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.hashes.get_vacant_object_hash().map_err(Into::into) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> { self.hashes.get_hash(hash_id).map_err(Into::into) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> { self.hashes.get_value(hash_id).map_err(Into::into) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.hashes.contains(hash_id).map_err(Into::into) } pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { for (hash_id, value) in batch { self.hashes.insert_value_at(hash_id, Arc::clone(&value))?; self.current_cycle.insert(hash_id, Some(value)); } Ok(()) } pub fn new_cycle_started(&mut self) { if let Some(sender) = &self.sender { let values_in_cycle = std::mem::take(&mut self.current_cycle); let new_ids = self.hashes.take_new_ids(); if let Err(e) = sender.try_send(Command::StartNewCycle { values_in_cycle, new_ids, }) { eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e); } if let Some(unused) = self.context_hashes_cycles.pop_front() { for hash in unused { self.context_hashes.remove(&hash); } } self.context_hashes_cycles.push_back(Default::default()); } } pub fn block_applied(&mut self, reused: Vec<HashId>) { if let Some(sender) = &self.sender { if let Err(e) = sender.send(Command::MarkReused { reused }) { eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e); } } } pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId>
pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> { let commit_hash = self .hashes .get_hash(commit_hash_id)? .ok_or(DBError::MissingObject { hash_id: commit_hash_id, })?; let mut hasher = DefaultHasher::new(); hasher.write(&commit_hash[..]); let hashed = hasher.finish(); self.context_hashes.insert(hashed, commit_hash_id); if let Some(back) = self.context_hashes_cycles.back_mut() { back.push(hashed); }; Ok(()) } #[cfg(test)] pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId { let vacant = self.get_vacant_entry_hash().unwrap(); vacant.write_with(|entry| *entry = entry_hash) } } impl Drop for InMemory { fn drop(&mut self) { let sender = match self.sender.take() { Some(sender) => sender, None => return, }; if let Err(e) = sender.send(Command::Close) { eprintln!("Fail to send Command::Close to GC worker: {:?}", e); return; } let thread_handle = match self.thread_handle.take() { Some(thread_handle) => thread_handle, None => return, }; if let Err(e) = thread_handle.join() { eprintln!("Fail to join GC worker thread: {:?}", e); } } }
{ let mut hasher = DefaultHasher::new(); hasher.write(context_hash.as_ref()); let hashed = hasher.finish(); self.context_hashes.get(&hashed).cloned() }
identifier_body
in_memory.rs
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT //! Implementation of an in-memory repository. use std::{ borrow::Cow, collections::{hash_map::DefaultHasher, BTreeMap, VecDeque}, hash::Hasher, mem::size_of, sync::{atomic::Ordering, Arc}, thread::JoinHandle, }; use crossbeam_channel::Sender; use crypto::hash::ContextHash; use tezos_timing::RepositoryMemoryUsage; use crate::{ gc::{ worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT}, GarbageCollectionError, GarbageCollector, }, hash::ObjectHash, persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable}, working_tree::{ shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings}, storage::DirEntryId, string_interner::{StringId, StringInterner}, }, Map, }; use tezos_spsc::Consumer; use super::{index_map::IndexMap, HashIdError}; use super::{HashId, VacantObjectHash}; #[derive(Debug)] pub struct HashValueStore { hashes: IndexMap<HashId, ObjectHash>, values: IndexMap<HashId, Option<Arc<[u8]>>>, free_ids: Option<Consumer<HashId>>, new_ids: Vec<HashId>, values_bytes: usize, } impl HashValueStore { pub(crate) fn new<T>(consumer: T) -> Self where T: Into<Option<Consumer<HashId>>>, { Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: consumer.into(), new_ids: Vec::with_capacity(1024), values_bytes: 0, } } pub fn
(&self) -> RepositoryMemoryUsage { let values_bytes = self.values_bytes; let values_capacity = self.values.capacity(); let hashes_capacity = self.hashes.capacity(); let total_bytes = values_bytes .saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>()) .saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters .saturating_add(hashes_capacity * size_of::<ObjectHash>()); RepositoryMemoryUsage { values_bytes, values_capacity, values_length: self.values.len(), hashes_capacity, hashes_length: self.hashes.len(), total_bytes, npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0), gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire), nshapes: 0, } } pub(crate) fn clear(&mut self) { *self = Self { hashes: IndexMap::new(), values: IndexMap::new(), free_ids: self.free_ids.take(), new_ids: Vec::new(), values_bytes: 0, } } pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> { let (hash_id, entry) = if let Some(free_id) = self.get_free_id() { if let Some(old_value) = self.values.set(free_id, None)? { self.values_bytes = self.values_bytes.saturating_sub(old_value.len()); } (free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?) } else { self.hashes.get_vacant_entry()? }; self.new_ids.push(hash_id); Ok(VacantObjectHash { entry: Some(entry), hash_id, }) } fn get_free_id(&mut self) -> Option<HashId> { self.free_ids.as_mut()?.pop().ok() } pub(crate) fn insert_value_at( &mut self, hash_id: HashId, value: Arc<[u8]>, ) -> Result<(), HashIdError> { self.values_bytes = self.values_bytes.saturating_add(value.len()); if let Some(old) = self.values.insert_at(hash_id, Some(value))? { self.values_bytes = self.values_bytes.saturating_sub(old.len()); } Ok(()) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> { self.hashes.get(hash_id) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> { match self.values.get(hash_id)? { Some(value) => Ok(value.as_ref().map(|v| v.as_ref())), None => Ok(None), } } pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> { Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some()) } fn take_new_ids(&mut self) -> Vec<HashId> { let new_ids = self.new_ids.clone(); self.new_ids.clear(); new_ids } } pub struct InMemory { current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>, pub hashes: HashValueStore, sender: Option<Sender<Command>>, pub context_hashes: Map<u64, HashId>, context_hashes_cycles: VecDeque<Vec<u64>>, thread_handle: Option<JoinHandle<()>>, shapes: DirectoryShapes, string_interner: StringInterner, } impl GarbageCollector for InMemory { fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> { self.new_cycle_started(); Ok(()) } fn block_applied( &mut self, referenced_older_objects: Vec<HashId>, ) -> Result<(), GarbageCollectionError> { self.block_applied(referenced_older_objects); Ok(()) } } impl Flushable for InMemory { fn flush(&self) -> Result<(), anyhow::Error> { Ok(()) } } impl Persistable for InMemory { fn is_persistent(&self) -> bool { false } } impl KeyValueStoreBackend for InMemory { fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { self.write_batch(batch) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.contains(hash_id) } fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> { self.put_context_hash_impl(hash_id) } fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> { Ok(self.get_context_hash_impl(context_hash)) } fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> { Ok(self.get_hash(hash_id)?.map(Cow::Borrowed)) } fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> { Ok(self.get_value(hash_id)?.map(Cow::Borrowed)) } fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.get_vacant_entry_hash() } fn clear_objects(&mut self) -> Result<(), DBError> { // `InMemory` has its own garbage collection Ok(()) } fn memory_usage(&self) -> RepositoryMemoryUsage { let mut mem = self.hashes.get_memory_usage(); mem.nshapes = self.shapes.nshapes(); mem } fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> { self.shapes .get_shape(shape_id) .map(ShapeStrings::SliceIds) .map_err(Into::into) } fn make_shape( &mut self, dir: &[(StringId, DirEntryId)], ) -> Result<Option<DirectoryShapeId>, DBError> { self.shapes.make_shape(dir).map_err(Into::into) } fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> { self.string_interner.extend_from(string_interner); Ok(()) } fn get_str(&self, string_id: StringId) -> Option<&str> { self.string_interner.get(string_id) } } impl InMemory { pub fn try_new() -> Result<Self, std::io::Error> { // TODO - TE-210: Remove once we hace proper support for history modes. let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC") .unwrap_or_else(|_| "false".to_string()) .parse::<bool>() .expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool"); let (sender, cons, thread_handle) = if garbage_collector_disabled { (None, None, None) } else { let (sender, recv) = crossbeam_channel::unbounded(); let (prod, cons) = tezos_spsc::bounded(2_000_000); let thread_handle = std::thread::Builder::new() .name("ctx-inmem-gc-thread".to_string()) .spawn(move || { GCThread { cycles: Cycles::default(), recv, free_ids: prod, pending: Vec::new(), } .run() })?; (Some(sender), Some(cons), Some(thread_handle)) }; let current_cycle = Default::default(); let hashes = HashValueStore::new(cons); let context_hashes = Default::default(); let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT); for _ in 0..PRESERVE_CYCLE_COUNT { context_hashes_cycles.push_back(Default::default()) } Ok(Self { current_cycle, hashes, sender, context_hashes, context_hashes_cycles, thread_handle, shapes: DirectoryShapes::default(), string_interner: StringInterner::default(), }) } pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> { self.hashes.get_vacant_object_hash().map_err(Into::into) } pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> { self.hashes.get_hash(hash_id).map_err(Into::into) } pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> { self.hashes.get_value(hash_id).map_err(Into::into) } fn contains(&self, hash_id: HashId) -> Result<bool, DBError> { self.hashes.contains(hash_id).map_err(Into::into) } pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> { for (hash_id, value) in batch { self.hashes.insert_value_at(hash_id, Arc::clone(&value))?; self.current_cycle.insert(hash_id, Some(value)); } Ok(()) } pub fn new_cycle_started(&mut self) { if let Some(sender) = &self.sender { let values_in_cycle = std::mem::take(&mut self.current_cycle); let new_ids = self.hashes.take_new_ids(); if let Err(e) = sender.try_send(Command::StartNewCycle { values_in_cycle, new_ids, }) { eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e); } if let Some(unused) = self.context_hashes_cycles.pop_front() { for hash in unused { self.context_hashes.remove(&hash); } } self.context_hashes_cycles.push_back(Default::default()); } } pub fn block_applied(&mut self, reused: Vec<HashId>) { if let Some(sender) = &self.sender { if let Err(e) = sender.send(Command::MarkReused { reused }) { eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e); } } } pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> { let mut hasher = DefaultHasher::new(); hasher.write(context_hash.as_ref()); let hashed = hasher.finish(); self.context_hashes.get(&hashed).cloned() } pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> { let commit_hash = self .hashes .get_hash(commit_hash_id)? .ok_or(DBError::MissingObject { hash_id: commit_hash_id, })?; let mut hasher = DefaultHasher::new(); hasher.write(&commit_hash[..]); let hashed = hasher.finish(); self.context_hashes.insert(hashed, commit_hash_id); if let Some(back) = self.context_hashes_cycles.back_mut() { back.push(hashed); }; Ok(()) } #[cfg(test)] pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId { let vacant = self.get_vacant_entry_hash().unwrap(); vacant.write_with(|entry| *entry = entry_hash) } } impl Drop for InMemory { fn drop(&mut self) { let sender = match self.sender.take() { Some(sender) => sender, None => return, }; if let Err(e) = sender.send(Command::Close) { eprintln!("Fail to send Command::Close to GC worker: {:?}", e); return; } let thread_handle = match self.thread_handle.take() { Some(thread_handle) => thread_handle, None => return, }; if let Err(e) = thread_handle.join() { eprintln!("Fail to join GC worker thread: {:?}", e); } } }
get_memory_usage
identifier_name
lib.rs
#![deny(missing_docs)] //! An append-only, on-disk key-value index with lockless reads use std::cell::UnsafeCell; use std::fs::OpenOptions; use std::hash::{Hash, Hasher}; use std::io; use std::marker::PhantomData; use std::mem; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use arrayvec::ArrayVec; use lazy_static::lazy_static; use memmap::MmapMut; use parking_lot::{Mutex, MutexGuard}; use seahash::SeaHasher; const NUM_LANES: usize = 64; const NUM_SHARDS: usize = 1024; const PAGE_SIZE: usize = 4096; const FIRST_LANE_PAGES: usize = 64; // marker struct for shard-mutexes struct Shard; lazy_static! { static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = { let mut locks = ArrayVec::new(); for _ in 0..NUM_SHARDS { locks.push(Mutex::new(Shard)) } locks }; } #[inline(always)] fn hash_val<T: Hash>(t: &T) -> u64 { let mut hasher = SeaHasher::new(); t.hash(&mut hasher); hasher.finish() } enum Found<'a, K, V> { Some(&'a Entry<K, V>), None(usize, usize, usize), Invalid(usize, usize, usize), } /// Marker type telling you your update was a no-op pub type AlreadyThere = bool; /// On-disk index structure mapping keys to values pub struct Index<K, V> { lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>, path: PathBuf, pages: Mutex<u64>, _marker: PhantomData<(K, V)>, } unsafe impl<K, V> Send for Index<K, V> {} unsafe impl<K, V> Sync for Index<K, V> {} #[derive(Debug)] struct Entry<K, V> { key: K, val: V, next: u64, kv_checksum: u64, next_checksum: u64, } // Wrapper reference for mutating entries, carrying a mutex guard struct EntryMut<'a, K, V> { entry: &'a mut Entry<K, V>, _lock: MutexGuard<'a, Shard>, } impl<'a, K, V> Deref for EntryMut<'a, K, V> { type Target = Entry<K, V>; fn deref(&self) -> &Self::Target { &self.entry } } impl<'a, K, V> DerefMut for EntryMut<'a, K, V> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.entry } } impl<K: Hash, V: Hash> Entry<K, V> { fn new(key: K, val: V) -> Self { let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val)); let entry = Entry { key, val, kv_checksum, next: 0, next_checksum: 0 + 1, }; debug_assert!(entry.valid()); entry } fn valid(&self) -> bool { if hash_val(&self.key).wrapping_add(hash_val(&self.val)) == self.kv_checksum && self.next + 1 == self.next_checksum { true } else { false } } fn set_next<I: Into<u64>>(&mut self, next: I) { let next = next.into(); self.next = next; self.next_checksum = next + 1; } } impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> { /// Create or load an index at `path` pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> { let mut lanes = ArrayVec::new(); // check for lane files already on disk for n in 0..NUM_LANES { let mut pathbuf = PathBuf::from(path.as_ref()); pathbuf.push(&format!("{:02x}", n)); if pathbuf.exists() { let file = OpenOptions::new().read(true).write(true).open(&pathbuf)?; let lane_pages = Self::lane_pages(n); let file_len = PAGE_SIZE as u64 * lane_pages as u64; file.set_len(file_len)?; unsafe { lanes.push(MmapMut::map_mut(&file)?) }; } } // find the number of already occupied pages let mut num_pages = 0; if let Some(last) = lanes.last() { // help the type inferance along a bit. let last: &MmapMut = last; // add up pages of all but the last lane, since they must all be full let mut full_pages = 0; for n in 0..lanes.len().saturating_sub(1) { println!("lane {}, pages {}", n, Self::lane_pages(n)); full_pages += Self::lane_pages(n) } // do a binary search to find the last populated page in the last lane let mut low_bound = 0; let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1; while low_bound + 1!= high_bound { let check = low_bound + (high_bound - low_bound) / 2; println!( "low bound: {}, high bound: {}, check {}", low_bound, high_bound, check, ); let page_ofs = PAGE_SIZE * check; // is there a valid entry in this page? for slot in 0..Self::entries_per_page() { let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); let ptr = last.as_ptr(); let entry: &Entry<K, V> = unsafe { mem::transmute(ptr.offset(slot_ofs as isize)) }; if entry.valid() { low_bound = check; break; } } if low_bound!= check { high_bound = check } } num_pages = full_pages + high_bound; } // create the index let index = Index { lanes: UnsafeCell::new(lanes), path: PathBuf::from(path.as_ref()), pages: Mutex::new(num_pages as u64), _marker: PhantomData, }; // initialize index with at least one page if num_pages == 0 { assert_eq!(index.new_page()?, 0); } Ok(index) } /// Returns how many pages have been allocated so far pub fn pages(&self) -> usize { *self.pages.lock() as usize } /// Returns how many pages fit into one lane #[inline(always)] fn lane_pages(n: usize) -> usize { 2_usize.pow(n as u32) * FIRST_LANE_PAGES } #[inline(always)] fn entries_per_page() -> usize { PAGE_SIZE / mem::size_of::<Entry<K, V>>() } // calculates the slot in the page this hashed key would // occupy at a certain depth #[inline(always)] fn slot(key_hash: u64, depth: usize) -> usize { (hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64) as usize } // produces following output over page with FIRST_LANE_PAGES = 2 // (0, 0), (0, 1), // (1, 0), (1, 1), (1, 2), (1, 3), // (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), //... and so on and so forth... #[inline(always)] fn lane_page(page: usize) -> (usize, usize) { let usize_bits = mem::size_of::<usize>() * 8; let i = page / FIRST_LANE_PAGES + 1; let lane = usize_bits - i.leading_zeros() as usize - 1; let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES; (lane, page) } fn new_lane(&self) -> io::Result<()> { let lanes_ptr = self.lanes.get(); let lane_nr = unsafe { (*lanes_ptr).len() }; let num_pages = Self::lane_pages(lane_nr); let mut path = self.path.clone(); path.push(format!("{:02x}", lane_nr)); let file_len = PAGE_SIZE as u64 * num_pages as u64; let file = OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; file.set_len(file_len)?; unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) } Ok(()) } fn new_page(&self) -> io::Result<u64> { let mut page_nr = self.pages.lock(); let (_, offset) = Self::lane_page(*page_nr as usize); if offset == 0 { // create new lane self.new_lane()? } let new_page_nr = *page_nr; *page_nr += 1; Ok(new_page_nr) } // Get a mutable reference to the `Entry`, fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V>
// Get a mutable reference to the `Entry`, // locking the corresponding shard. fn entry_mut( &self, lane: usize, page: usize, slot: usize, ) -> EntryMut<K, V> { let shard = (page ^ slot) % NUM_SHARDS; // Lock the entry for writing let lock = SHARDS[shard].lock(); let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); EntryMut { entry: unsafe { mem::transmute( (*self.lanes.get())[lane] .as_ptr() .offset(slot_ofs as isize), ) }, _lock: lock, } } // Traverse the tree to find the entry for this key fn find_key(&self, k: &K) -> io::Result<Found<K, V>> { let mut depth = 0; let mut abs_page = 0; loop { let hash = hash_val(&k); let slot = Self::slot(hash, depth); let (lane, page) = Self::lane_page(abs_page); let entry = self.entry(lane, page, slot); if!entry.valid() { return Ok(Found::Invalid(lane, page, slot)); } if &entry.key == k { return Ok(Found::Some(entry)); } else if entry.next == 0 { return Ok(Found::None(lane, page, slot)); } else { abs_page = entry.next as usize; } depth += 1; } } /// Inserts a key-value pair into the index, if the key is already /// present, this is a no-op pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> { match self.find_key(&key)? { Found::Some(_) => { // no-op Ok(true) } Found::Invalid(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.valid() && entry.next!= 0 { // Someone already wrote here, recurse! // We accept the performance hit of re-traversing // the whole tree, since this case is uncommon, // and makes the implementation simpler. mem::drop(entry); self.insert(key, val) } else { *entry = Entry::new(key, val); return Ok(false); } } Found::None(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.next!= 0 { // again, another thread was here before us } else { entry.set_next(self.new_page()?); } // recurse mem::drop(entry); self.insert(key, val) } } } /// Looks up a value with `key` in the index pub fn get(&self, key: &K) -> io::Result<Option<&V>> { match self.find_key(key)? { Found::Some(entry) => Ok(Some(&entry.val)), _ => Ok(None), } } } #[cfg(test)] mod tests { use std::sync::Arc; use std::thread; use rand::{seq::SliceRandom, thread_rng}; use tempfile::tempdir; use super::*; #[test] fn simple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); index.insert(0, 0).unwrap(); assert_eq!(index.get(&0).unwrap(), Some(&0)); } const N: u64 = 1024 * 256; #[test] fn multiple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); for i in 0..N { index.insert(i, i).unwrap(); } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } #[test] fn reload() { let dir = tempdir().unwrap(); let mut pages; { { let index_a = Index::new(&dir).unwrap(); for i in 0..N { index_a.insert(i, i).unwrap(); } pages = index_a.pages(); mem::drop(index_a); } let index_b = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_b.pages()); for i in 0..N { assert_eq!(index_b.get(&i).unwrap(), Some(&i)); } for i in N..N * 2 { index_b.insert(i, i).unwrap(); } pages = index_b.pages(); mem::drop(index_b); } let index_c = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_c.pages()); for i in 0..N * 2 { assert_eq!(index_c.get(&i).unwrap(), Some(&i)); } } const N_THREADS: usize = 8; // The stress test creates an index, and simultaneously writes // entries in random order from `N_THREADS` threads, // while at the same time reading from an equal amount of threads. // // When all threads are finished, a final read-through is made to see // that all key value pairs are present. #[test] fn stress() { let dir = tempdir().unwrap(); let index = Arc::new(Index::new(&dir).unwrap()); let mut all_indicies = vec![]; for i in 0..N { all_indicies.push(i); } let mut rng = thread_rng(); // shuffle the order of the writes let mut shuffles_write = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_write.push(new); } // shuffle the order of the reads let mut shuffles_read = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_read.push(new); } let mut threads_running = vec![]; for i in 0..N_THREADS { // shuffled write let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]); let index_write = index.clone(); // write threads threads_running.push(thread::spawn(move || { for write in shuffle_write { index_write.insert(write, write).unwrap(); } })); // shuffled reads let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]); let index_read = index.clone(); // read threads threads_running.push(thread::spawn(move || { for read in shuffle_read { match index_read.get(&read).unwrap() { Some(val) => assert_eq!(val, &read), None => (), } } })); } // make sure all threads finish successfully for thread in threads_running { thread.join().unwrap() } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } }
{ // Get a reference to the `Entry` let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); unsafe { mem::transmute( (*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize), ) } }
identifier_body
lib.rs
#![deny(missing_docs)] //! An append-only, on-disk key-value index with lockless reads use std::cell::UnsafeCell; use std::fs::OpenOptions; use std::hash::{Hash, Hasher}; use std::io; use std::marker::PhantomData; use std::mem; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use arrayvec::ArrayVec; use lazy_static::lazy_static; use memmap::MmapMut; use parking_lot::{Mutex, MutexGuard}; use seahash::SeaHasher; const NUM_LANES: usize = 64; const NUM_SHARDS: usize = 1024; const PAGE_SIZE: usize = 4096; const FIRST_LANE_PAGES: usize = 64; // marker struct for shard-mutexes struct Shard; lazy_static! { static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = { let mut locks = ArrayVec::new(); for _ in 0..NUM_SHARDS { locks.push(Mutex::new(Shard)) } locks }; } #[inline(always)] fn hash_val<T: Hash>(t: &T) -> u64 { let mut hasher = SeaHasher::new(); t.hash(&mut hasher); hasher.finish() } enum Found<'a, K, V> { Some(&'a Entry<K, V>), None(usize, usize, usize), Invalid(usize, usize, usize), } /// Marker type telling you your update was a no-op pub type AlreadyThere = bool; /// On-disk index structure mapping keys to values pub struct Index<K, V> { lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>, path: PathBuf, pages: Mutex<u64>, _marker: PhantomData<(K, V)>, } unsafe impl<K, V> Send for Index<K, V> {} unsafe impl<K, V> Sync for Index<K, V> {} #[derive(Debug)] struct Entry<K, V> { key: K, val: V, next: u64, kv_checksum: u64, next_checksum: u64, } // Wrapper reference for mutating entries, carrying a mutex guard struct EntryMut<'a, K, V> { entry: &'a mut Entry<K, V>, _lock: MutexGuard<'a, Shard>, } impl<'a, K, V> Deref for EntryMut<'a, K, V> { type Target = Entry<K, V>; fn deref(&self) -> &Self::Target { &self.entry } } impl<'a, K, V> DerefMut for EntryMut<'a, K, V> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.entry } } impl<K: Hash, V: Hash> Entry<K, V> { fn new(key: K, val: V) -> Self { let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val)); let entry = Entry { key, val, kv_checksum, next: 0, next_checksum: 0 + 1, }; debug_assert!(entry.valid()); entry } fn valid(&self) -> bool { if hash_val(&self.key).wrapping_add(hash_val(&self.val)) == self.kv_checksum && self.next + 1 == self.next_checksum { true } else { false } } fn set_next<I: Into<u64>>(&mut self, next: I) { let next = next.into(); self.next = next; self.next_checksum = next + 1; } } impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> { /// Create or load an index at `path` pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> { let mut lanes = ArrayVec::new(); // check for lane files already on disk for n in 0..NUM_LANES { let mut pathbuf = PathBuf::from(path.as_ref()); pathbuf.push(&format!("{:02x}", n)); if pathbuf.exists() { let file = OpenOptions::new().read(true).write(true).open(&pathbuf)?; let lane_pages = Self::lane_pages(n); let file_len = PAGE_SIZE as u64 * lane_pages as u64; file.set_len(file_len)?; unsafe { lanes.push(MmapMut::map_mut(&file)?) }; } } // find the number of already occupied pages let mut num_pages = 0; if let Some(last) = lanes.last() { // help the type inferance along a bit. let last: &MmapMut = last; // add up pages of all but the last lane, since they must all be full let mut full_pages = 0; for n in 0..lanes.len().saturating_sub(1) { println!("lane {}, pages {}", n, Self::lane_pages(n)); full_pages += Self::lane_pages(n) } // do a binary search to find the last populated page in the last lane let mut low_bound = 0; let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1; while low_bound + 1!= high_bound { let check = low_bound + (high_bound - low_bound) / 2; println!( "low bound: {}, high bound: {}, check {}", low_bound, high_bound, check, ); let page_ofs = PAGE_SIZE * check; // is there a valid entry in this page? for slot in 0..Self::entries_per_page() { let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); let ptr = last.as_ptr(); let entry: &Entry<K, V> = unsafe { mem::transmute(ptr.offset(slot_ofs as isize)) }; if entry.valid() { low_bound = check; break; } } if low_bound!= check { high_bound = check } } num_pages = full_pages + high_bound; }
pages: Mutex::new(num_pages as u64), _marker: PhantomData, }; // initialize index with at least one page if num_pages == 0 { assert_eq!(index.new_page()?, 0); } Ok(index) } /// Returns how many pages have been allocated so far pub fn pages(&self) -> usize { *self.pages.lock() as usize } /// Returns how many pages fit into one lane #[inline(always)] fn lane_pages(n: usize) -> usize { 2_usize.pow(n as u32) * FIRST_LANE_PAGES } #[inline(always)] fn entries_per_page() -> usize { PAGE_SIZE / mem::size_of::<Entry<K, V>>() } // calculates the slot in the page this hashed key would // occupy at a certain depth #[inline(always)] fn slot(key_hash: u64, depth: usize) -> usize { (hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64) as usize } // produces following output over page with FIRST_LANE_PAGES = 2 // (0, 0), (0, 1), // (1, 0), (1, 1), (1, 2), (1, 3), // (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), //... and so on and so forth... #[inline(always)] fn lane_page(page: usize) -> (usize, usize) { let usize_bits = mem::size_of::<usize>() * 8; let i = page / FIRST_LANE_PAGES + 1; let lane = usize_bits - i.leading_zeros() as usize - 1; let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES; (lane, page) } fn new_lane(&self) -> io::Result<()> { let lanes_ptr = self.lanes.get(); let lane_nr = unsafe { (*lanes_ptr).len() }; let num_pages = Self::lane_pages(lane_nr); let mut path = self.path.clone(); path.push(format!("{:02x}", lane_nr)); let file_len = PAGE_SIZE as u64 * num_pages as u64; let file = OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; file.set_len(file_len)?; unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) } Ok(()) } fn new_page(&self) -> io::Result<u64> { let mut page_nr = self.pages.lock(); let (_, offset) = Self::lane_page(*page_nr as usize); if offset == 0 { // create new lane self.new_lane()? } let new_page_nr = *page_nr; *page_nr += 1; Ok(new_page_nr) } // Get a mutable reference to the `Entry`, fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> { // Get a reference to the `Entry` let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); unsafe { mem::transmute( (*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize), ) } } // Get a mutable reference to the `Entry`, // locking the corresponding shard. fn entry_mut( &self, lane: usize, page: usize, slot: usize, ) -> EntryMut<K, V> { let shard = (page ^ slot) % NUM_SHARDS; // Lock the entry for writing let lock = SHARDS[shard].lock(); let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); EntryMut { entry: unsafe { mem::transmute( (*self.lanes.get())[lane] .as_ptr() .offset(slot_ofs as isize), ) }, _lock: lock, } } // Traverse the tree to find the entry for this key fn find_key(&self, k: &K) -> io::Result<Found<K, V>> { let mut depth = 0; let mut abs_page = 0; loop { let hash = hash_val(&k); let slot = Self::slot(hash, depth); let (lane, page) = Self::lane_page(abs_page); let entry = self.entry(lane, page, slot); if!entry.valid() { return Ok(Found::Invalid(lane, page, slot)); } if &entry.key == k { return Ok(Found::Some(entry)); } else if entry.next == 0 { return Ok(Found::None(lane, page, slot)); } else { abs_page = entry.next as usize; } depth += 1; } } /// Inserts a key-value pair into the index, if the key is already /// present, this is a no-op pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> { match self.find_key(&key)? { Found::Some(_) => { // no-op Ok(true) } Found::Invalid(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.valid() && entry.next!= 0 { // Someone already wrote here, recurse! // We accept the performance hit of re-traversing // the whole tree, since this case is uncommon, // and makes the implementation simpler. mem::drop(entry); self.insert(key, val) } else { *entry = Entry::new(key, val); return Ok(false); } } Found::None(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.next!= 0 { // again, another thread was here before us } else { entry.set_next(self.new_page()?); } // recurse mem::drop(entry); self.insert(key, val) } } } /// Looks up a value with `key` in the index pub fn get(&self, key: &K) -> io::Result<Option<&V>> { match self.find_key(key)? { Found::Some(entry) => Ok(Some(&entry.val)), _ => Ok(None), } } } #[cfg(test)] mod tests { use std::sync::Arc; use std::thread; use rand::{seq::SliceRandom, thread_rng}; use tempfile::tempdir; use super::*; #[test] fn simple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); index.insert(0, 0).unwrap(); assert_eq!(index.get(&0).unwrap(), Some(&0)); } const N: u64 = 1024 * 256; #[test] fn multiple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); for i in 0..N { index.insert(i, i).unwrap(); } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } #[test] fn reload() { let dir = tempdir().unwrap(); let mut pages; { { let index_a = Index::new(&dir).unwrap(); for i in 0..N { index_a.insert(i, i).unwrap(); } pages = index_a.pages(); mem::drop(index_a); } let index_b = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_b.pages()); for i in 0..N { assert_eq!(index_b.get(&i).unwrap(), Some(&i)); } for i in N..N * 2 { index_b.insert(i, i).unwrap(); } pages = index_b.pages(); mem::drop(index_b); } let index_c = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_c.pages()); for i in 0..N * 2 { assert_eq!(index_c.get(&i).unwrap(), Some(&i)); } } const N_THREADS: usize = 8; // The stress test creates an index, and simultaneously writes // entries in random order from `N_THREADS` threads, // while at the same time reading from an equal amount of threads. // // When all threads are finished, a final read-through is made to see // that all key value pairs are present. #[test] fn stress() { let dir = tempdir().unwrap(); let index = Arc::new(Index::new(&dir).unwrap()); let mut all_indicies = vec![]; for i in 0..N { all_indicies.push(i); } let mut rng = thread_rng(); // shuffle the order of the writes let mut shuffles_write = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_write.push(new); } // shuffle the order of the reads let mut shuffles_read = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_read.push(new); } let mut threads_running = vec![]; for i in 0..N_THREADS { // shuffled write let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]); let index_write = index.clone(); // write threads threads_running.push(thread::spawn(move || { for write in shuffle_write { index_write.insert(write, write).unwrap(); } })); // shuffled reads let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]); let index_read = index.clone(); // read threads threads_running.push(thread::spawn(move || { for read in shuffle_read { match index_read.get(&read).unwrap() { Some(val) => assert_eq!(val, &read), None => (), } } })); } // make sure all threads finish successfully for thread in threads_running { thread.join().unwrap() } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } }
// create the index let index = Index { lanes: UnsafeCell::new(lanes), path: PathBuf::from(path.as_ref()),
random_line_split
lib.rs
#![deny(missing_docs)] //! An append-only, on-disk key-value index with lockless reads use std::cell::UnsafeCell; use std::fs::OpenOptions; use std::hash::{Hash, Hasher}; use std::io; use std::marker::PhantomData; use std::mem; use std::ops::{Deref, DerefMut}; use std::path::{Path, PathBuf}; use arrayvec::ArrayVec; use lazy_static::lazy_static; use memmap::MmapMut; use parking_lot::{Mutex, MutexGuard}; use seahash::SeaHasher; const NUM_LANES: usize = 64; const NUM_SHARDS: usize = 1024; const PAGE_SIZE: usize = 4096; const FIRST_LANE_PAGES: usize = 64; // marker struct for shard-mutexes struct Shard; lazy_static! { static ref SHARDS: ArrayVec<[Mutex<Shard>; NUM_SHARDS]> = { let mut locks = ArrayVec::new(); for _ in 0..NUM_SHARDS { locks.push(Mutex::new(Shard)) } locks }; } #[inline(always)] fn hash_val<T: Hash>(t: &T) -> u64 { let mut hasher = SeaHasher::new(); t.hash(&mut hasher); hasher.finish() } enum Found<'a, K, V> { Some(&'a Entry<K, V>), None(usize, usize, usize), Invalid(usize, usize, usize), } /// Marker type telling you your update was a no-op pub type AlreadyThere = bool; /// On-disk index structure mapping keys to values pub struct Index<K, V> { lanes: UnsafeCell<ArrayVec<[MmapMut; NUM_LANES]>>, path: PathBuf, pages: Mutex<u64>, _marker: PhantomData<(K, V)>, } unsafe impl<K, V> Send for Index<K, V> {} unsafe impl<K, V> Sync for Index<K, V> {} #[derive(Debug)] struct Entry<K, V> { key: K, val: V, next: u64, kv_checksum: u64, next_checksum: u64, } // Wrapper reference for mutating entries, carrying a mutex guard struct EntryMut<'a, K, V> { entry: &'a mut Entry<K, V>, _lock: MutexGuard<'a, Shard>, } impl<'a, K, V> Deref for EntryMut<'a, K, V> { type Target = Entry<K, V>; fn
(&self) -> &Self::Target { &self.entry } } impl<'a, K, V> DerefMut for EntryMut<'a, K, V> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.entry } } impl<K: Hash, V: Hash> Entry<K, V> { fn new(key: K, val: V) -> Self { let kv_checksum = hash_val(&key).wrapping_add(hash_val(&val)); let entry = Entry { key, val, kv_checksum, next: 0, next_checksum: 0 + 1, }; debug_assert!(entry.valid()); entry } fn valid(&self) -> bool { if hash_val(&self.key).wrapping_add(hash_val(&self.val)) == self.kv_checksum && self.next + 1 == self.next_checksum { true } else { false } } fn set_next<I: Into<u64>>(&mut self, next: I) { let next = next.into(); self.next = next; self.next_checksum = next + 1; } } impl<K: Hash + Copy + PartialEq, V: Hash + Copy> Index<K, V> { /// Create or load an index at `path` pub fn new<P: AsRef<Path>>(path: &P) -> io::Result<Self> { let mut lanes = ArrayVec::new(); // check for lane files already on disk for n in 0..NUM_LANES { let mut pathbuf = PathBuf::from(path.as_ref()); pathbuf.push(&format!("{:02x}", n)); if pathbuf.exists() { let file = OpenOptions::new().read(true).write(true).open(&pathbuf)?; let lane_pages = Self::lane_pages(n); let file_len = PAGE_SIZE as u64 * lane_pages as u64; file.set_len(file_len)?; unsafe { lanes.push(MmapMut::map_mut(&file)?) }; } } // find the number of already occupied pages let mut num_pages = 0; if let Some(last) = lanes.last() { // help the type inferance along a bit. let last: &MmapMut = last; // add up pages of all but the last lane, since they must all be full let mut full_pages = 0; for n in 0..lanes.len().saturating_sub(1) { println!("lane {}, pages {}", n, Self::lane_pages(n)); full_pages += Self::lane_pages(n) } // do a binary search to find the last populated page in the last lane let mut low_bound = 0; let mut high_bound = Self::lane_pages(lanes.len() - 1) - 1; while low_bound + 1!= high_bound { let check = low_bound + (high_bound - low_bound) / 2; println!( "low bound: {}, high bound: {}, check {}", low_bound, high_bound, check, ); let page_ofs = PAGE_SIZE * check; // is there a valid entry in this page? for slot in 0..Self::entries_per_page() { let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); let ptr = last.as_ptr(); let entry: &Entry<K, V> = unsafe { mem::transmute(ptr.offset(slot_ofs as isize)) }; if entry.valid() { low_bound = check; break; } } if low_bound!= check { high_bound = check } } num_pages = full_pages + high_bound; } // create the index let index = Index { lanes: UnsafeCell::new(lanes), path: PathBuf::from(path.as_ref()), pages: Mutex::new(num_pages as u64), _marker: PhantomData, }; // initialize index with at least one page if num_pages == 0 { assert_eq!(index.new_page()?, 0); } Ok(index) } /// Returns how many pages have been allocated so far pub fn pages(&self) -> usize { *self.pages.lock() as usize } /// Returns how many pages fit into one lane #[inline(always)] fn lane_pages(n: usize) -> usize { 2_usize.pow(n as u32) * FIRST_LANE_PAGES } #[inline(always)] fn entries_per_page() -> usize { PAGE_SIZE / mem::size_of::<Entry<K, V>>() } // calculates the slot in the page this hashed key would // occupy at a certain depth #[inline(always)] fn slot(key_hash: u64, depth: usize) -> usize { (hash_val(&(key_hash + depth as u64)) % Self::entries_per_page() as u64) as usize } // produces following output over page with FIRST_LANE_PAGES = 2 // (0, 0), (0, 1), // (1, 0), (1, 1), (1, 2), (1, 3), // (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), //... and so on and so forth... #[inline(always)] fn lane_page(page: usize) -> (usize, usize) { let usize_bits = mem::size_of::<usize>() * 8; let i = page / FIRST_LANE_PAGES + 1; let lane = usize_bits - i.leading_zeros() as usize - 1; let page = page - (2usize.pow(lane as u32) - 1) * FIRST_LANE_PAGES; (lane, page) } fn new_lane(&self) -> io::Result<()> { let lanes_ptr = self.lanes.get(); let lane_nr = unsafe { (*lanes_ptr).len() }; let num_pages = Self::lane_pages(lane_nr); let mut path = self.path.clone(); path.push(format!("{:02x}", lane_nr)); let file_len = PAGE_SIZE as u64 * num_pages as u64; let file = OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; file.set_len(file_len)?; unsafe { (*lanes_ptr).push(MmapMut::map_mut(&file)?) } Ok(()) } fn new_page(&self) -> io::Result<u64> { let mut page_nr = self.pages.lock(); let (_, offset) = Self::lane_page(*page_nr as usize); if offset == 0 { // create new lane self.new_lane()? } let new_page_nr = *page_nr; *page_nr += 1; Ok(new_page_nr) } // Get a mutable reference to the `Entry`, fn entry(&self, lane: usize, page: usize, slot: usize) -> &Entry<K, V> { // Get a reference to the `Entry` let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); unsafe { mem::transmute( (*self.lanes.get())[lane].as_ptr().offset(slot_ofs as isize), ) } } // Get a mutable reference to the `Entry`, // locking the corresponding shard. fn entry_mut( &self, lane: usize, page: usize, slot: usize, ) -> EntryMut<K, V> { let shard = (page ^ slot) % NUM_SHARDS; // Lock the entry for writing let lock = SHARDS[shard].lock(); let page_ofs = PAGE_SIZE * page; let slot_ofs = page_ofs + slot * mem::size_of::<Entry<K, V>>(); EntryMut { entry: unsafe { mem::transmute( (*self.lanes.get())[lane] .as_ptr() .offset(slot_ofs as isize), ) }, _lock: lock, } } // Traverse the tree to find the entry for this key fn find_key(&self, k: &K) -> io::Result<Found<K, V>> { let mut depth = 0; let mut abs_page = 0; loop { let hash = hash_val(&k); let slot = Self::slot(hash, depth); let (lane, page) = Self::lane_page(abs_page); let entry = self.entry(lane, page, slot); if!entry.valid() { return Ok(Found::Invalid(lane, page, slot)); } if &entry.key == k { return Ok(Found::Some(entry)); } else if entry.next == 0 { return Ok(Found::None(lane, page, slot)); } else { abs_page = entry.next as usize; } depth += 1; } } /// Inserts a key-value pair into the index, if the key is already /// present, this is a no-op pub fn insert(&self, key: K, val: V) -> io::Result<AlreadyThere> { match self.find_key(&key)? { Found::Some(_) => { // no-op Ok(true) } Found::Invalid(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.valid() && entry.next!= 0 { // Someone already wrote here, recurse! // We accept the performance hit of re-traversing // the whole tree, since this case is uncommon, // and makes the implementation simpler. mem::drop(entry); self.insert(key, val) } else { *entry = Entry::new(key, val); return Ok(false); } } Found::None(lane, page, slot) => { let mut entry = self.entry_mut(lane, page, slot); if entry.next!= 0 { // again, another thread was here before us } else { entry.set_next(self.new_page()?); } // recurse mem::drop(entry); self.insert(key, val) } } } /// Looks up a value with `key` in the index pub fn get(&self, key: &K) -> io::Result<Option<&V>> { match self.find_key(key)? { Found::Some(entry) => Ok(Some(&entry.val)), _ => Ok(None), } } } #[cfg(test)] mod tests { use std::sync::Arc; use std::thread; use rand::{seq::SliceRandom, thread_rng}; use tempfile::tempdir; use super::*; #[test] fn simple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); index.insert(0, 0).unwrap(); assert_eq!(index.get(&0).unwrap(), Some(&0)); } const N: u64 = 1024 * 256; #[test] fn multiple() { let dir = tempdir().unwrap(); let index = Index::new(&dir).unwrap(); for i in 0..N { index.insert(i, i).unwrap(); } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } #[test] fn reload() { let dir = tempdir().unwrap(); let mut pages; { { let index_a = Index::new(&dir).unwrap(); for i in 0..N { index_a.insert(i, i).unwrap(); } pages = index_a.pages(); mem::drop(index_a); } let index_b = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_b.pages()); for i in 0..N { assert_eq!(index_b.get(&i).unwrap(), Some(&i)); } for i in N..N * 2 { index_b.insert(i, i).unwrap(); } pages = index_b.pages(); mem::drop(index_b); } let index_c = Index::new(&dir).unwrap(); // make sure the page count matches assert_eq!(pages, index_c.pages()); for i in 0..N * 2 { assert_eq!(index_c.get(&i).unwrap(), Some(&i)); } } const N_THREADS: usize = 8; // The stress test creates an index, and simultaneously writes // entries in random order from `N_THREADS` threads, // while at the same time reading from an equal amount of threads. // // When all threads are finished, a final read-through is made to see // that all key value pairs are present. #[test] fn stress() { let dir = tempdir().unwrap(); let index = Arc::new(Index::new(&dir).unwrap()); let mut all_indicies = vec![]; for i in 0..N { all_indicies.push(i); } let mut rng = thread_rng(); // shuffle the order of the writes let mut shuffles_write = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_write.push(new); } // shuffle the order of the reads let mut shuffles_read = vec![]; for _ in 0..N_THREADS { let mut new = all_indicies.clone(); SliceRandom::shuffle(&mut new[..], &mut rng); shuffles_read.push(new); } let mut threads_running = vec![]; for i in 0..N_THREADS { // shuffled write let shuffle_write = mem::replace(&mut shuffles_write[i], vec![]); let index_write = index.clone(); // write threads threads_running.push(thread::spawn(move || { for write in shuffle_write { index_write.insert(write, write).unwrap(); } })); // shuffled reads let shuffle_read = mem::replace(&mut shuffles_read[i], vec![]); let index_read = index.clone(); // read threads threads_running.push(thread::spawn(move || { for read in shuffle_read { match index_read.get(&read).unwrap() { Some(val) => assert_eq!(val, &read), None => (), } } })); } // make sure all threads finish successfully for thread in threads_running { thread.join().unwrap() } for i in 0..N { assert_eq!(index.get(&i).unwrap(), Some(&i)); } } }
deref
identifier_name
fmt.rs
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. //! This module provides file formatting utilities using //! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript). //! //! At the moment it is only consumed using CLI but in //! the future it can be easily extended to provide //! the same functions as ops available in JS runtime. use crate::colors; use crate::diff::diff; use crate::file_watcher; use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt}; use crate::text_encoding; use deno_core::error::generic_error; use deno_core::error::AnyError; use deno_core::futures; use deno_core::futures::FutureExt; use std::fs; use std::io::stdin; use std::io::stdout; use std::io::Read; use std::io::Write; use std::path::Path; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; const BOM_CHAR: char = '\u{FEFF}'; /// Format JavaScript/TypeScript files. pub async fn format( args: Vec<PathBuf>, ignore: Vec<PathBuf>, check: bool, watch: bool, ) -> Result<(), AnyError> { let target_file_resolver = || { // collect the files that are to be formatted collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| { if files.is_empty() { Err(generic_error("No target files found.")) } else { Ok(files) } }) }; let operation = |paths: Vec<PathBuf>| { let config = get_typescript_config(); async move { if check { check_source_files(config, paths).await?; } else { format_source_files(config, paths).await?; } Ok(()) } .boxed_local() }; if watch { file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?; } else { operation(target_file_resolver()?).await?; } Ok(()) } /// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks /// (ts/tsx, js/jsx). fn format_markdown( file_text: &str, ts_config: dprint_plugin_typescript::configuration::Configuration, ) -> Result<String, String> { let md_config = get_markdown_config(); dprint_plugin_markdown::format_text( &file_text, &md_config, Box::new(move |tag, text, line_width| { let tag = tag.to_lowercase(); if matches!( tag.as_str(), "ts" | "tsx" | "js" | "jsx" | "javascript" | "typescript" | "json" | "jsonc" ) { // It's important to tell dprint proper file extension, otherwise // it might parse the file twice. let extension = match tag.as_str() { "javascript" => "js", "typescript" => "ts", rest => rest, }; if matches!(extension, "json" | "jsonc") { let mut json_config = get_json_config(); json_config.line_width = line_width; dprint_plugin_json::format_text(&text, &json_config) } else { let fake_filename = PathBuf::from(format!("deno_fmt_stdin.{}", extension)); let mut codeblock_config = ts_config.clone(); codeblock_config.line_width = line_width; dprint_plugin_typescript::format_text( &fake_filename, &text, &codeblock_config, ) } } else { Ok(text.to_string()) } }), ) } /// Formats JSON and JSONC using the rules provided by.deno() /// of configuration builder of https://github.com/dprint/dprint-plugin-json. /// See https://git.io/Jt4ht for configuration. fn format_json(file_text: &str) -> Result<String, String> { let json_config = get_json_config(); dprint_plugin_json::format_text(&file_text, &json_config) } async fn check_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let not_formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); // prevent threads outputting at the same time let output_lock = Arc::new(Mutex::new(0)); run_parallelized(paths, { let not_formatted_files_count = not_formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_text = read_file_contents(&file_path)?.text; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_text) } else { dprint_plugin_typescript::format_text(&file_path, &file_text, &config) }; match r { Ok(formatted_text) => { if formatted_text!= file_text { not_formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); let diff = diff(&file_text, &formatted_text); info!(""); info!("{} {}:", colors::bold("from"), file_path.display()); info!("{}", diff); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error checking: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let not_formatted_files_count = not_formatted_files_count.load(Ordering::Relaxed); let checked_files_count = checked_files_count.load(Ordering::Relaxed); let checked_files_str = format!("{} {}", checked_files_count, files_str(checked_files_count)); if not_formatted_files_count == 0 { info!("Checked {}", checked_files_str); Ok(()) } else { let not_formatted_files_str = files_str(not_formatted_files_count); Err(generic_error(format!( "Found {} not formatted {} in {}", not_formatted_files_count, not_formatted_files_str, checked_files_str, ))) } } async fn format_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time run_parallelized(paths, { let formatted_files_count = formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_contents = read_file_contents(&file_path)?; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_contents.text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_contents.text) } else { dprint_plugin_typescript::format_text( &file_path, &file_contents.text, &config, ) }; match r { Ok(formatted_text) => { if formatted_text!= file_contents.text { write_file_contents( &file_path, FileContents { had_bom: file_contents.had_bom, text: formatted_text, }, )?; formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); info!("{}", file_path.to_string_lossy()); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error formatting: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let formatted_files_count = formatted_files_count.load(Ordering::Relaxed); debug!( "Formatted {} {}", formatted_files_count, files_str(formatted_files_count), ); let checked_files_count = checked_files_count.load(Ordering::Relaxed); info!( "Checked {} {}", checked_files_count, files_str(checked_files_count) ); Ok(()) } /// Format stdin and write result to stdout. /// Treats input as TypeScript or as set by `--ext` flag. /// Compatible with `--check` flag. pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> { let mut source = String::new(); if stdin().read_to_string(&mut source).is_err() { return Err(generic_error("Failed to read from stdin")); } let config = get_typescript_config(); let r = if ext.as_str() == "md" { format_markdown(&source, config) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&source) } else { // dprint will fallback to jsx parsing if parsing this as a.ts file doesn't work dprint_plugin_typescript::format_text( &PathBuf::from("_stdin.ts"), &source, &config, ) }; match r { Ok(formatted_text) => { if check { if formatted_text!= source { println!("Not formatted stdin"); } } else { stdout().write_all(formatted_text.as_bytes())?; } } Err(e) => { return Err(generic_error(e)); } } Ok(()) } fn files_str(len: usize) -> &'static str { if len <= 1 { "file" } else { "files" } } fn get_typescript_config( ) -> dprint_plugin_typescript::configuration::Configuration { dprint_plugin_typescript::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration { dprint_plugin_markdown::configuration::ConfigurationBuilder::new() .deno() .build() } fn
() -> dprint_plugin_json::configuration::Configuration { dprint_plugin_json::configuration::ConfigurationBuilder::new() .deno() .build() } struct FileContents { text: String, had_bom: bool, } fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> { let file_bytes = fs::read(&file_path)?; let charset = text_encoding::detect_charset(&file_bytes); let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?; let had_bom = file_text.starts_with(BOM_CHAR); let text = if had_bom { // remove the BOM String::from(&file_text[BOM_CHAR.len_utf8()..]) } else { String::from(file_text) }; Ok(FileContents { text, had_bom }) } fn write_file_contents( file_path: &Path, file_contents: FileContents, ) -> Result<(), AnyError> { let file_text = if file_contents.had_bom { // add back the BOM format!("{}{}", BOM_CHAR, file_contents.text) } else { file_contents.text }; Ok(fs::write(file_path, file_text)?) } pub async fn run_parallelized<F>( file_paths: Vec<PathBuf>, f: F, ) -> Result<(), AnyError> where F: FnOnce(PathBuf) -> Result<(), AnyError> + Send +'static + Clone, { let handles = file_paths.iter().map(|file_path| { let f = f.clone(); let file_path = file_path.clone(); tokio::task::spawn_blocking(move || f(file_path)) }); let join_results = futures::future::join_all(handles).await; // find the tasks that panicked and let the user know which files let panic_file_paths = join_results .iter() .enumerate() .filter_map(|(i, join_result)| { join_result .as_ref() .err() .map(|_| file_paths[i].to_string_lossy()) }) .collect::<Vec<_>>(); if!panic_file_paths.is_empty() { panic!("Panic formatting: {}", panic_file_paths.join(", ")) } // check for any errors and if so return the first one let mut errors = join_results.into_iter().filter_map(|join_result| { join_result .ok() .map(|handle_result| handle_result.err()) .flatten() }); if let Some(e) = errors.next() { Err(e) } else { Ok(()) } }
get_json_config
identifier_name
fmt.rs
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. //! This module provides file formatting utilities using //! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript). //! //! At the moment it is only consumed using CLI but in //! the future it can be easily extended to provide //! the same functions as ops available in JS runtime. use crate::colors; use crate::diff::diff; use crate::file_watcher; use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt}; use crate::text_encoding; use deno_core::error::generic_error; use deno_core::error::AnyError; use deno_core::futures; use deno_core::futures::FutureExt; use std::fs; use std::io::stdin; use std::io::stdout; use std::io::Read; use std::io::Write; use std::path::Path; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; const BOM_CHAR: char = '\u{FEFF}'; /// Format JavaScript/TypeScript files. pub async fn format( args: Vec<PathBuf>, ignore: Vec<PathBuf>, check: bool, watch: bool, ) -> Result<(), AnyError> { let target_file_resolver = || { // collect the files that are to be formatted collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| { if files.is_empty() { Err(generic_error("No target files found.")) } else { Ok(files) } }) }; let operation = |paths: Vec<PathBuf>| { let config = get_typescript_config(); async move { if check { check_source_files(config, paths).await?; } else { format_source_files(config, paths).await?; } Ok(()) } .boxed_local() }; if watch { file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?; } else { operation(target_file_resolver()?).await?; } Ok(()) } /// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks /// (ts/tsx, js/jsx). fn format_markdown( file_text: &str, ts_config: dprint_plugin_typescript::configuration::Configuration, ) -> Result<String, String> { let md_config = get_markdown_config(); dprint_plugin_markdown::format_text( &file_text, &md_config, Box::new(move |tag, text, line_width| { let tag = tag.to_lowercase(); if matches!( tag.as_str(), "ts" | "tsx" | "js" | "jsx" | "javascript" | "typescript" | "json" | "jsonc" ) { // It's important to tell dprint proper file extension, otherwise // it might parse the file twice. let extension = match tag.as_str() { "javascript" => "js", "typescript" => "ts", rest => rest, }; if matches!(extension, "json" | "jsonc") { let mut json_config = get_json_config(); json_config.line_width = line_width; dprint_plugin_json::format_text(&text, &json_config) } else { let fake_filename = PathBuf::from(format!("deno_fmt_stdin.{}", extension)); let mut codeblock_config = ts_config.clone(); codeblock_config.line_width = line_width; dprint_plugin_typescript::format_text( &fake_filename, &text, &codeblock_config, ) } } else { Ok(text.to_string()) } }), ) } /// Formats JSON and JSONC using the rules provided by.deno() /// of configuration builder of https://github.com/dprint/dprint-plugin-json. /// See https://git.io/Jt4ht for configuration. fn format_json(file_text: &str) -> Result<String, String> { let json_config = get_json_config(); dprint_plugin_json::format_text(&file_text, &json_config) } async fn check_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let not_formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); // prevent threads outputting at the same time let output_lock = Arc::new(Mutex::new(0)); run_parallelized(paths, { let not_formatted_files_count = not_formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_text = read_file_contents(&file_path)?.text; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_text) } else { dprint_plugin_typescript::format_text(&file_path, &file_text, &config) }; match r { Ok(formatted_text) => { if formatted_text!= file_text { not_formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); let diff = diff(&file_text, &formatted_text); info!(""); info!("{} {}:", colors::bold("from"), file_path.display()); info!("{}", diff); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error checking: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let not_formatted_files_count = not_formatted_files_count.load(Ordering::Relaxed); let checked_files_count = checked_files_count.load(Ordering::Relaxed); let checked_files_str = format!("{} {}", checked_files_count, files_str(checked_files_count)); if not_formatted_files_count == 0 { info!("Checked {}", checked_files_str); Ok(()) } else { let not_formatted_files_str = files_str(not_formatted_files_count); Err(generic_error(format!( "Found {} not formatted {} in {}", not_formatted_files_count, not_formatted_files_str, checked_files_str, ))) } } async fn format_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time run_parallelized(paths, { let formatted_files_count = formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_contents = read_file_contents(&file_path)?; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_contents.text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_contents.text) } else { dprint_plugin_typescript::format_text( &file_path, &file_contents.text, &config, ) }; match r { Ok(formatted_text) => { if formatted_text!= file_contents.text { write_file_contents( &file_path, FileContents { had_bom: file_contents.had_bom, text: formatted_text, }, )?; formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); info!("{}", file_path.to_string_lossy()); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error formatting: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let formatted_files_count = formatted_files_count.load(Ordering::Relaxed); debug!( "Formatted {} {}", formatted_files_count, files_str(formatted_files_count), ); let checked_files_count = checked_files_count.load(Ordering::Relaxed); info!( "Checked {} {}", checked_files_count, files_str(checked_files_count) ); Ok(()) } /// Format stdin and write result to stdout. /// Treats input as TypeScript or as set by `--ext` flag. /// Compatible with `--check` flag. pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> { let mut source = String::new(); if stdin().read_to_string(&mut source).is_err() { return Err(generic_error("Failed to read from stdin")); } let config = get_typescript_config(); let r = if ext.as_str() == "md" { format_markdown(&source, config) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&source) } else { // dprint will fallback to jsx parsing if parsing this as a.ts file doesn't work dprint_plugin_typescript::format_text( &PathBuf::from("_stdin.ts"), &source, &config, ) }; match r { Ok(formatted_text) => { if check { if formatted_text!= source { println!("Not formatted stdin"); } } else { stdout().write_all(formatted_text.as_bytes())?; } } Err(e) =>
} Ok(()) } fn files_str(len: usize) -> &'static str { if len <= 1 { "file" } else { "files" } } fn get_typescript_config( ) -> dprint_plugin_typescript::configuration::Configuration { dprint_plugin_typescript::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration { dprint_plugin_markdown::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_json_config() -> dprint_plugin_json::configuration::Configuration { dprint_plugin_json::configuration::ConfigurationBuilder::new() .deno() .build() } struct FileContents { text: String, had_bom: bool, } fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> { let file_bytes = fs::read(&file_path)?; let charset = text_encoding::detect_charset(&file_bytes); let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?; let had_bom = file_text.starts_with(BOM_CHAR); let text = if had_bom { // remove the BOM String::from(&file_text[BOM_CHAR.len_utf8()..]) } else { String::from(file_text) }; Ok(FileContents { text, had_bom }) } fn write_file_contents( file_path: &Path, file_contents: FileContents, ) -> Result<(), AnyError> { let file_text = if file_contents.had_bom { // add back the BOM format!("{}{}", BOM_CHAR, file_contents.text) } else { file_contents.text }; Ok(fs::write(file_path, file_text)?) } pub async fn run_parallelized<F>( file_paths: Vec<PathBuf>, f: F, ) -> Result<(), AnyError> where F: FnOnce(PathBuf) -> Result<(), AnyError> + Send +'static + Clone, { let handles = file_paths.iter().map(|file_path| { let f = f.clone(); let file_path = file_path.clone(); tokio::task::spawn_blocking(move || f(file_path)) }); let join_results = futures::future::join_all(handles).await; // find the tasks that panicked and let the user know which files let panic_file_paths = join_results .iter() .enumerate() .filter_map(|(i, join_result)| { join_result .as_ref() .err() .map(|_| file_paths[i].to_string_lossy()) }) .collect::<Vec<_>>(); if!panic_file_paths.is_empty() { panic!("Panic formatting: {}", panic_file_paths.join(", ")) } // check for any errors and if so return the first one let mut errors = join_results.into_iter().filter_map(|join_result| { join_result .ok() .map(|handle_result| handle_result.err()) .flatten() }); if let Some(e) = errors.next() { Err(e) } else { Ok(()) } }
{ return Err(generic_error(e)); }
conditional_block
fmt.rs
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. //! This module provides file formatting utilities using //! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript). //! //! At the moment it is only consumed using CLI but in //! the future it can be easily extended to provide //! the same functions as ops available in JS runtime. use crate::colors; use crate::diff::diff; use crate::file_watcher; use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt}; use crate::text_encoding; use deno_core::error::generic_error; use deno_core::error::AnyError; use deno_core::futures; use deno_core::futures::FutureExt; use std::fs; use std::io::stdin; use std::io::stdout; use std::io::Read; use std::io::Write; use std::path::Path; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; const BOM_CHAR: char = '\u{FEFF}'; /// Format JavaScript/TypeScript files. pub async fn format( args: Vec<PathBuf>, ignore: Vec<PathBuf>, check: bool, watch: bool, ) -> Result<(), AnyError> { let target_file_resolver = || { // collect the files that are to be formatted collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| { if files.is_empty() { Err(generic_error("No target files found.")) } else { Ok(files) } }) }; let operation = |paths: Vec<PathBuf>| { let config = get_typescript_config(); async move { if check { check_source_files(config, paths).await?; } else { format_source_files(config, paths).await?; } Ok(()) } .boxed_local() }; if watch { file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?; } else { operation(target_file_resolver()?).await?; }
/// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks /// (ts/tsx, js/jsx). fn format_markdown( file_text: &str, ts_config: dprint_plugin_typescript::configuration::Configuration, ) -> Result<String, String> { let md_config = get_markdown_config(); dprint_plugin_markdown::format_text( &file_text, &md_config, Box::new(move |tag, text, line_width| { let tag = tag.to_lowercase(); if matches!( tag.as_str(), "ts" | "tsx" | "js" | "jsx" | "javascript" | "typescript" | "json" | "jsonc" ) { // It's important to tell dprint proper file extension, otherwise // it might parse the file twice. let extension = match tag.as_str() { "javascript" => "js", "typescript" => "ts", rest => rest, }; if matches!(extension, "json" | "jsonc") { let mut json_config = get_json_config(); json_config.line_width = line_width; dprint_plugin_json::format_text(&text, &json_config) } else { let fake_filename = PathBuf::from(format!("deno_fmt_stdin.{}", extension)); let mut codeblock_config = ts_config.clone(); codeblock_config.line_width = line_width; dprint_plugin_typescript::format_text( &fake_filename, &text, &codeblock_config, ) } } else { Ok(text.to_string()) } }), ) } /// Formats JSON and JSONC using the rules provided by.deno() /// of configuration builder of https://github.com/dprint/dprint-plugin-json. /// See https://git.io/Jt4ht for configuration. fn format_json(file_text: &str) -> Result<String, String> { let json_config = get_json_config(); dprint_plugin_json::format_text(&file_text, &json_config) } async fn check_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let not_formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); // prevent threads outputting at the same time let output_lock = Arc::new(Mutex::new(0)); run_parallelized(paths, { let not_formatted_files_count = not_formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_text = read_file_contents(&file_path)?.text; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_text) } else { dprint_plugin_typescript::format_text(&file_path, &file_text, &config) }; match r { Ok(formatted_text) => { if formatted_text!= file_text { not_formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); let diff = diff(&file_text, &formatted_text); info!(""); info!("{} {}:", colors::bold("from"), file_path.display()); info!("{}", diff); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error checking: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let not_formatted_files_count = not_formatted_files_count.load(Ordering::Relaxed); let checked_files_count = checked_files_count.load(Ordering::Relaxed); let checked_files_str = format!("{} {}", checked_files_count, files_str(checked_files_count)); if not_formatted_files_count == 0 { info!("Checked {}", checked_files_str); Ok(()) } else { let not_formatted_files_str = files_str(not_formatted_files_count); Err(generic_error(format!( "Found {} not formatted {} in {}", not_formatted_files_count, not_formatted_files_str, checked_files_str, ))) } } async fn format_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time run_parallelized(paths, { let formatted_files_count = formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_contents = read_file_contents(&file_path)?; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_contents.text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_contents.text) } else { dprint_plugin_typescript::format_text( &file_path, &file_contents.text, &config, ) }; match r { Ok(formatted_text) => { if formatted_text!= file_contents.text { write_file_contents( &file_path, FileContents { had_bom: file_contents.had_bom, text: formatted_text, }, )?; formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); info!("{}", file_path.to_string_lossy()); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error formatting: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let formatted_files_count = formatted_files_count.load(Ordering::Relaxed); debug!( "Formatted {} {}", formatted_files_count, files_str(formatted_files_count), ); let checked_files_count = checked_files_count.load(Ordering::Relaxed); info!( "Checked {} {}", checked_files_count, files_str(checked_files_count) ); Ok(()) } /// Format stdin and write result to stdout. /// Treats input as TypeScript or as set by `--ext` flag. /// Compatible with `--check` flag. pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> { let mut source = String::new(); if stdin().read_to_string(&mut source).is_err() { return Err(generic_error("Failed to read from stdin")); } let config = get_typescript_config(); let r = if ext.as_str() == "md" { format_markdown(&source, config) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&source) } else { // dprint will fallback to jsx parsing if parsing this as a.ts file doesn't work dprint_plugin_typescript::format_text( &PathBuf::from("_stdin.ts"), &source, &config, ) }; match r { Ok(formatted_text) => { if check { if formatted_text!= source { println!("Not formatted stdin"); } } else { stdout().write_all(formatted_text.as_bytes())?; } } Err(e) => { return Err(generic_error(e)); } } Ok(()) } fn files_str(len: usize) -> &'static str { if len <= 1 { "file" } else { "files" } } fn get_typescript_config( ) -> dprint_plugin_typescript::configuration::Configuration { dprint_plugin_typescript::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration { dprint_plugin_markdown::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_json_config() -> dprint_plugin_json::configuration::Configuration { dprint_plugin_json::configuration::ConfigurationBuilder::new() .deno() .build() } struct FileContents { text: String, had_bom: bool, } fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError> { let file_bytes = fs::read(&file_path)?; let charset = text_encoding::detect_charset(&file_bytes); let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?; let had_bom = file_text.starts_with(BOM_CHAR); let text = if had_bom { // remove the BOM String::from(&file_text[BOM_CHAR.len_utf8()..]) } else { String::from(file_text) }; Ok(FileContents { text, had_bom }) } fn write_file_contents( file_path: &Path, file_contents: FileContents, ) -> Result<(), AnyError> { let file_text = if file_contents.had_bom { // add back the BOM format!("{}{}", BOM_CHAR, file_contents.text) } else { file_contents.text }; Ok(fs::write(file_path, file_text)?) } pub async fn run_parallelized<F>( file_paths: Vec<PathBuf>, f: F, ) -> Result<(), AnyError> where F: FnOnce(PathBuf) -> Result<(), AnyError> + Send +'static + Clone, { let handles = file_paths.iter().map(|file_path| { let f = f.clone(); let file_path = file_path.clone(); tokio::task::spawn_blocking(move || f(file_path)) }); let join_results = futures::future::join_all(handles).await; // find the tasks that panicked and let the user know which files let panic_file_paths = join_results .iter() .enumerate() .filter_map(|(i, join_result)| { join_result .as_ref() .err() .map(|_| file_paths[i].to_string_lossy()) }) .collect::<Vec<_>>(); if!panic_file_paths.is_empty() { panic!("Panic formatting: {}", panic_file_paths.join(", ")) } // check for any errors and if so return the first one let mut errors = join_results.into_iter().filter_map(|join_result| { join_result .ok() .map(|handle_result| handle_result.err()) .flatten() }); if let Some(e) = errors.next() { Err(e) } else { Ok(()) } }
Ok(()) }
random_line_split
fmt.rs
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. //! This module provides file formatting utilities using //! [`dprint-plugin-typescript`](https://github.com/dprint/dprint-plugin-typescript). //! //! At the moment it is only consumed using CLI but in //! the future it can be easily extended to provide //! the same functions as ops available in JS runtime. use crate::colors; use crate::diff::diff; use crate::file_watcher; use crate::fs_util::{collect_files, get_extension, is_supported_ext_fmt}; use crate::text_encoding; use deno_core::error::generic_error; use deno_core::error::AnyError; use deno_core::futures; use deno_core::futures::FutureExt; use std::fs; use std::io::stdin; use std::io::stdout; use std::io::Read; use std::io::Write; use std::path::Path; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; const BOM_CHAR: char = '\u{FEFF}'; /// Format JavaScript/TypeScript files. pub async fn format( args: Vec<PathBuf>, ignore: Vec<PathBuf>, check: bool, watch: bool, ) -> Result<(), AnyError> { let target_file_resolver = || { // collect the files that are to be formatted collect_files(&args, &ignore, is_supported_ext_fmt).and_then(|files| { if files.is_empty() { Err(generic_error("No target files found.")) } else { Ok(files) } }) }; let operation = |paths: Vec<PathBuf>| { let config = get_typescript_config(); async move { if check { check_source_files(config, paths).await?; } else { format_source_files(config, paths).await?; } Ok(()) } .boxed_local() }; if watch { file_watcher::watch_func(target_file_resolver, operation, "Fmt").await?; } else { operation(target_file_resolver()?).await?; } Ok(()) } /// Formats markdown (using https://github.com/dprint/dprint-plugin-markdown) and its code blocks /// (ts/tsx, js/jsx). fn format_markdown( file_text: &str, ts_config: dprint_plugin_typescript::configuration::Configuration, ) -> Result<String, String> { let md_config = get_markdown_config(); dprint_plugin_markdown::format_text( &file_text, &md_config, Box::new(move |tag, text, line_width| { let tag = tag.to_lowercase(); if matches!( tag.as_str(), "ts" | "tsx" | "js" | "jsx" | "javascript" | "typescript" | "json" | "jsonc" ) { // It's important to tell dprint proper file extension, otherwise // it might parse the file twice. let extension = match tag.as_str() { "javascript" => "js", "typescript" => "ts", rest => rest, }; if matches!(extension, "json" | "jsonc") { let mut json_config = get_json_config(); json_config.line_width = line_width; dprint_plugin_json::format_text(&text, &json_config) } else { let fake_filename = PathBuf::from(format!("deno_fmt_stdin.{}", extension)); let mut codeblock_config = ts_config.clone(); codeblock_config.line_width = line_width; dprint_plugin_typescript::format_text( &fake_filename, &text, &codeblock_config, ) } } else { Ok(text.to_string()) } }), ) } /// Formats JSON and JSONC using the rules provided by.deno() /// of configuration builder of https://github.com/dprint/dprint-plugin-json. /// See https://git.io/Jt4ht for configuration. fn format_json(file_text: &str) -> Result<String, String> { let json_config = get_json_config(); dprint_plugin_json::format_text(&file_text, &json_config) } async fn check_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let not_formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); // prevent threads outputting at the same time let output_lock = Arc::new(Mutex::new(0)); run_parallelized(paths, { let not_formatted_files_count = not_formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_text = read_file_contents(&file_path)?.text; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_text) } else { dprint_plugin_typescript::format_text(&file_path, &file_text, &config) }; match r { Ok(formatted_text) => { if formatted_text!= file_text { not_formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); let diff = diff(&file_text, &formatted_text); info!(""); info!("{} {}:", colors::bold("from"), file_path.display()); info!("{}", diff); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error checking: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let not_formatted_files_count = not_formatted_files_count.load(Ordering::Relaxed); let checked_files_count = checked_files_count.load(Ordering::Relaxed); let checked_files_str = format!("{} {}", checked_files_count, files_str(checked_files_count)); if not_formatted_files_count == 0 { info!("Checked {}", checked_files_str); Ok(()) } else { let not_formatted_files_str = files_str(not_formatted_files_count); Err(generic_error(format!( "Found {} not formatted {} in {}", not_formatted_files_count, not_formatted_files_str, checked_files_str, ))) } } async fn format_source_files( config: dprint_plugin_typescript::configuration::Configuration, paths: Vec<PathBuf>, ) -> Result<(), AnyError> { let formatted_files_count = Arc::new(AtomicUsize::new(0)); let checked_files_count = Arc::new(AtomicUsize::new(0)); let output_lock = Arc::new(Mutex::new(0)); // prevent threads outputting at the same time run_parallelized(paths, { let formatted_files_count = formatted_files_count.clone(); let checked_files_count = checked_files_count.clone(); move |file_path| { checked_files_count.fetch_add(1, Ordering::Relaxed); let file_contents = read_file_contents(&file_path)?; let ext = get_extension(&file_path).unwrap_or_else(String::new); let r = if ext == "md" { format_markdown(&file_contents.text, config.clone()) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&file_contents.text) } else { dprint_plugin_typescript::format_text( &file_path, &file_contents.text, &config, ) }; match r { Ok(formatted_text) => { if formatted_text!= file_contents.text { write_file_contents( &file_path, FileContents { had_bom: file_contents.had_bom, text: formatted_text, }, )?; formatted_files_count.fetch_add(1, Ordering::Relaxed); let _g = output_lock.lock().unwrap(); info!("{}", file_path.to_string_lossy()); } } Err(e) => { let _g = output_lock.lock().unwrap(); eprintln!("Error formatting: {}", file_path.to_string_lossy()); eprintln!(" {}", e); } } Ok(()) } }) .await?; let formatted_files_count = formatted_files_count.load(Ordering::Relaxed); debug!( "Formatted {} {}", formatted_files_count, files_str(formatted_files_count), ); let checked_files_count = checked_files_count.load(Ordering::Relaxed); info!( "Checked {} {}", checked_files_count, files_str(checked_files_count) ); Ok(()) } /// Format stdin and write result to stdout. /// Treats input as TypeScript or as set by `--ext` flag. /// Compatible with `--check` flag. pub fn format_stdin(check: bool, ext: String) -> Result<(), AnyError> { let mut source = String::new(); if stdin().read_to_string(&mut source).is_err() { return Err(generic_error("Failed to read from stdin")); } let config = get_typescript_config(); let r = if ext.as_str() == "md" { format_markdown(&source, config) } else if matches!(ext.as_str(), "json" | "jsonc") { format_json(&source) } else { // dprint will fallback to jsx parsing if parsing this as a.ts file doesn't work dprint_plugin_typescript::format_text( &PathBuf::from("_stdin.ts"), &source, &config, ) }; match r { Ok(formatted_text) => { if check { if formatted_text!= source { println!("Not formatted stdin"); } } else { stdout().write_all(formatted_text.as_bytes())?; } } Err(e) => { return Err(generic_error(e)); } } Ok(()) } fn files_str(len: usize) -> &'static str { if len <= 1 { "file" } else { "files" } } fn get_typescript_config( ) -> dprint_plugin_typescript::configuration::Configuration { dprint_plugin_typescript::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_markdown_config() -> dprint_plugin_markdown::configuration::Configuration { dprint_plugin_markdown::configuration::ConfigurationBuilder::new() .deno() .build() } fn get_json_config() -> dprint_plugin_json::configuration::Configuration { dprint_plugin_json::configuration::ConfigurationBuilder::new() .deno() .build() } struct FileContents { text: String, had_bom: bool, } fn read_file_contents(file_path: &Path) -> Result<FileContents, AnyError>
fn write_file_contents( file_path: &Path, file_contents: FileContents, ) -> Result<(), AnyError> { let file_text = if file_contents.had_bom { // add back the BOM format!("{}{}", BOM_CHAR, file_contents.text) } else { file_contents.text }; Ok(fs::write(file_path, file_text)?) } pub async fn run_parallelized<F>( file_paths: Vec<PathBuf>, f: F, ) -> Result<(), AnyError> where F: FnOnce(PathBuf) -> Result<(), AnyError> + Send +'static + Clone, { let handles = file_paths.iter().map(|file_path| { let f = f.clone(); let file_path = file_path.clone(); tokio::task::spawn_blocking(move || f(file_path)) }); let join_results = futures::future::join_all(handles).await; // find the tasks that panicked and let the user know which files let panic_file_paths = join_results .iter() .enumerate() .filter_map(|(i, join_result)| { join_result .as_ref() .err() .map(|_| file_paths[i].to_string_lossy()) }) .collect::<Vec<_>>(); if!panic_file_paths.is_empty() { panic!("Panic formatting: {}", panic_file_paths.join(", ")) } // check for any errors and if so return the first one let mut errors = join_results.into_iter().filter_map(|join_result| { join_result .ok() .map(|handle_result| handle_result.err()) .flatten() }); if let Some(e) = errors.next() { Err(e) } else { Ok(()) } }
{ let file_bytes = fs::read(&file_path)?; let charset = text_encoding::detect_charset(&file_bytes); let file_text = text_encoding::convert_to_utf8(&file_bytes, charset)?; let had_bom = file_text.starts_with(BOM_CHAR); let text = if had_bom { // remove the BOM String::from(&file_text[BOM_CHAR.len_utf8()..]) } else { String::from(file_text) }; Ok(FileContents { text, had_bom }) }
identifier_body
global_stats.rs
current = kitchen_sink.total_year_downloads(current_year)?; } let n = current[day.ordinal0() as usize]; match day.weekday() { // this sucks a bit due to mon/fri being UTC, and overlapping with the weekend // in the rest of the world. Weekday::Sat | Weekday::Sun => weekend_sum += n, _ => weekday_sum += n, }; day -= chrono::Duration::days(1); } dl.push((weekday_sum, weekend_sum)); } dl.reverse(); Ok(dl) } pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> { let (categories, recent_crates) = try_join( category_stats(kitchen_sink), kitchen_sink.notable_recently_updated_crates(4100)).await?; let urler = Urler::new(None); let start = Utc.ymd(2015, 5, 15); // Rust 1.0 let start_week_offset = start.ordinal0()/7; let end = Utc::today() - chrono::Duration::days(2); let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6; let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?; let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?; let mut recent_compat = HashMap::with_capacity(recent_crates.len()); let mut rustc_stats_recent_num = 0; for (o, _) in recent_crates { if let Some(v) = compat_data.remove(&o) { recent_compat.insert(o, v); rustc_stats_recent_num += 1; if rustc_stats_recent_num >= 4000 { break; } } } let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?; let dl = downloads_over_time(start, end, kitchen_sink)?; let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?; hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring assert!(dl.len() >= 52*2); let this_year = &dl[dl.len()-52..]; let last_year = &dl[dl.len()-52*2..dl.len()-52]; fn sum2(s: &[(u64, u64)]) -> (u64, u64) { let mut a_sum = 0; let mut b_sum = 0; s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; }); (a_sum, b_sum) } let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32; let mut tmp_sum = 0; let downloads_this_year = sum2(this_year); let downloads_last_year = sum2(last_year); let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0); let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0); let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000; let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true, &[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150], |n| if n > 11 {format!("≥{n}")} else {n.to_string()}); let hs_deps2 = Histogram { max: hs_deps1.max, buckets: hs_deps1.buckets.split_off(10), bucket_labels: hs_deps1.bucket_labels.split_off(10), }; let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?; let mut hs_rev_deps = Histogram::new(rev_deps, true, &[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}); hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5)); let age_label = |n| match n { 0..=1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }; let total_crate_num = kitchen_sink.all_crates().count() as u32; let stats = GlobalStats { total_crate_num, total_owners_at_month, max_total_owners, max_daily_downloads_rate, start_week_offset, weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32, dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2), dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2), max_downloads_per_week, dl_grid_line_every, hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}), hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| { let mut t = format_bytes(n*1024); t.insert(0, '≤'); t }), hs_deps1, hs_deps2, hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n { 0 => "one-off".to_string(), 1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }), hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_owner_crates, categories, rustc_stats_all, rustc_stats_recent, rustc_stats_recent_num, hs_rev_deps, }; templates::global_stats(out, &Page { title: "State of the Rust/Cargo crates ecosystem".to_owned(), description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()), noindex: false, search_meta: true, critical_css_data: Some(include_str!("../../style/public/home.css")), critical_css_dev_url: Some("/home.css"), ..Default::default() }, &dl, &stats, &urler)?; Ok(()) } #[derive(Default, Copy, Clone, Debug)] pub struct Compat { pub(crate) bad: u32, pub(crate) maybe_bad: u32, pub(crate) unknown: u32, pub(crate) maybe_ok: u32, pub(crate) ok: u32, } impl Compat { pub fn sum(&self) -> u32 { self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok } } fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> { // (ok, maybe, not), [0] is unused let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize]; for c in compat.values() { // can't compile at all if!c.iter().any(|(_, c)| c.has_ever_built()) { continue; } // stats for latest crate version only let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) { Some((_, c)) => c, None => continue, }; let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) { Some((_, c)) => c, None => latest_ver, }; let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0); let newest_bad = latest_ver.newest_bad().unwrap_or(0); let oldest_ok = latest_ver.oldest_ok().unwrap_or(999); let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999); for (ver, c) in rustc_versions.iter_mut().enumerate() { let ver = ver as u16; if ver >= oldest_ok { if ver >= oldest_ok_raw { c.ok += 1; } else { c.maybe_ok += 1; } } else if ver <= newest_bad { if ver <= newest_bad_raw { c.bad += 1; } else { c.maybe_bad += 1; } } else { c.unknown += 1; } } } // resize to width let width = 330; for c in &mut rustc_versions { let sum = c.sum(); c.bad = (c.bad * width + width / 2) / sum; c.ok = (c.ok * width + width / 2) / sum; c.maybe_bad = (c.maybe_bad * width + width / 2) / sum; c.maybe_ok = (c.maybe_ok * width + width / 2) / sum; c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok; } Ok(rustc_versions) } fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> { let mut out = Vec::with_capacity(sub.len()); for c in sub.values() { if c.slug == "uncategorized" { continue; } out.push(TreeBox { cat: c, label: c.name.clone(), title: c.name.clone(), count: 0, weight: 0., bounds: treemap::Rect::new(), color: String::new(), font_size: 12., sub: cat_slugs(&c.sub), }); } out } #[derive(Debug, Clone)] pub struct TreeBox { pub cat: &'static C
, pub title: String, pub label: String, pub font_size: f64, /// SVG fill pub color: String, pub count: u32, pub weight: f64, pub bounds: treemap::Rect, pub sub: Vec<TreeBox>, } impl TreeBox { pub fn line_y(&self, nth: usize) -> f64 { self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64 } pub fn can_fit_count(&self) -> bool { self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h } } impl treemap::Mappable for TreeBox { fn size(&self) -> f64 { self.weight } fn bounds(&self) -> &treemap::Rect { &self.bounds } fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; } } async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> { use treemap::*; let mut roots = cat_slugs(&CATEGORIES.root); #[track_caller] fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); items.swap_remove(pos) } #[track_caller] fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); &mut items[pos] } fn new_cat(sub: Vec<TreeBox>) -> TreeBox { TreeBox { cat: CATEGORIES.root.values().next().unwrap(), title: String::new(), label: String::new(), font_size: 0., color: String::new(), count: 0, weight: 0., bounds: Rect::new(), sub, } } // names don't fit get_cat("database-implementations", &mut roots).label = "Database".into(); get_cat("simulation", &mut roots).label = "Sim".into(); get_cat("caching", &mut roots).label = "Cache".into(); get_cat("config", &mut roots).label = "Config".into(); get_cat("os", &mut roots).label = "OS".into(); get_cat("internationalization", &mut roots).label = "i18n".into(); get_cat("authentication", &mut roots).label = "Auth".into(); get_cat("visualization", &mut roots).label = "Visualize".into(); get_cat("accessibility", &mut roots).label = "a11y".into(); get_cat("compilers", &mut roots).label = "Lang".into(); get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into(); get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into(); get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into(); // group them in a more sensible way let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)]; roots.push(new_cat(parsers)); let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)]; roots.push(new_cat(hw)); let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)]; roots.push(new_cat(db)); let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)]; roots.push(new_cat(gg)); let int = take_cat("command-line-interface", &mut roots); let cli = vec![int, take_cat("command-line-utilities", &mut roots)]; roots.push(new_cat(cli)); let mut editors = take_cat("text-editors", &mut roots); editors.label = "Editors".into(); let txt = vec![ take_cat("text-processing", &mut roots), editors, take_cat("template-engine", &mut roots), take_cat("value-formatting", &mut roots), ]; roots.push(new_cat(txt)); let wasm = take_cat("wasm", &mut roots); get_cat("web-programming", &mut roots).sub.push(wasm); let mut asyn = take_cat("asynchronous", &mut roots); asyn.label = "Async".into(); get_cat("network-programming", &mut roots).sub.push(asyn); let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub); proc.label = "Proc macros".into(); get_cat("rust-patterns", &mut roots).sub.push(proc); let concurrency = take_cat("concurrency", &mut roots); get_cat("rust-patterns", &mut roots).sub.push(concurrency); let mut cr = get_cat("cryptography", &mut roots).sub.remove(0); cr.label = "Crypto Magic Beans".into(); roots.push(cr); // first layout of top-level boxes (won't be used for anything other than second layout) for top in roots.iter_mut() { let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? }; top.count = count; top.weight = weight; let mut top_copy = top.clone(); top_copy.sub = Vec::new(); for i in top.sub.iter_mut() { let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?; i.count = count; i.weight = weight; top.count += i.count; top.weight += i.weight; assert!(i.sub.is_empty()); } if top_copy.count > 0 { top.sub.insert(0, top_copy); } } let mut items_flattened = Vec::new(); let layout = TreemapLayout::new(); layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.)); for parent in roots.iter_mut() { let layout = TreemapLayout::new(); layout.layout_items(&mut parent.sub, parent.bounds); items_flattened.append(&mut parent.sub); } postprocess_treebox_items(&mut items_flattened); Ok(items_flattened) } fn postprocess_treebox_items(items: &mut Vec<TreeBox>) { let colors = [ [0xff, 0xf1, 0xe6], [0xe2, 0xec, 0xe9], [0xDC, 0xED, 0xC1], [0xcd, 0xda, 0xfd], [0xbe, 0xe1, 0xe6], [0xfd, 0xe2, 0xe4], [0xdf, 0xe7, 0xfd], [0xFF, 0xD3, 0xB6], [0xea, 0xe4, 0xe9], [0xd0, 0xd1, 0xff], [0xf4, 0xda, 0xe2], [0xde, 0xc3, 0xe1], [0xd4, 0xe0, 0xf9], [0xFF, 0xD3, 0xB6], [0xDF, 0xCB, 0xD2], ]; let len = items.len() as f32; for (i, item) in &mut items.iter_mut().enumerate() { let x = 0.8 + (i as f32 / len) * 0.2; let c = colors[i % colors.len()]; let c = [ (c[0] as f32 * x + (1. - x) * 200.) as u8, (c[1] as f32 * x + (1. - x) * 100.) as u8, (c[2] as f32 * x + (1. - x) * 200.) as u8 ]; let mut l = lab::Lab::from_rgb(&c); l.l = (l.l + 90.) * 0.5; // fix my bad palette let c = l.to_rgb(); item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]); let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize; let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false)); let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = maybe_label.len(); let try_font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize; let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.; let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break)); let chars = label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = label.len(); item.label = label.join("\n"); item.font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); } } async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> { let all_owners = kitchen_sink.crate_all_owners().await?; eprintln!("got {} owners", all_owners.len()); assert!(all_owners.len() > 1000); let mut owner_crates_with_ids = HashMap::new(); let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(
ategory
identifier_name
global_stats.rs
current = kitchen_sink.total_year_downloads(current_year)?; } let n = current[day.ordinal0() as usize]; match day.weekday() { // this sucks a bit due to mon/fri being UTC, and overlapping with the weekend // in the rest of the world. Weekday::Sat | Weekday::Sun => weekend_sum += n, _ => weekday_sum += n, }; day -= chrono::Duration::days(1); } dl.push((weekday_sum, weekend_sum)); } dl.reverse(); Ok(dl) } pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> { let (categories, recent_crates) = try_join( category_stats(kitchen_sink), kitchen_sink.notable_recently_updated_crates(4100)).await?; let urler = Urler::new(None); let start = Utc.ymd(2015, 5, 15); // Rust 1.0 let start_week_offset = start.ordinal0()/7; let end = Utc::today() - chrono::Duration::days(2); let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6; let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?; let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?; let mut recent_compat = HashMap::with_capacity(recent_crates.len()); let mut rustc_stats_recent_num = 0; for (o, _) in recent_crates { if let Some(v) = compat_data.remove(&o) { recent_compat.insert(o, v); rustc_stats_recent_num += 1; if rustc_stats_recent_num >= 4000 { break; } } } let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?; let dl = downloads_over_time(start, end, kitchen_sink)?; let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?; hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring assert!(dl.len() >= 52*2); let this_year = &dl[dl.len()-52..]; let last_year = &dl[dl.len()-52*2..dl.len()-52]; fn sum2(s: &[(u64, u64)]) -> (u64, u64) { let mut a_sum = 0; let mut b_sum = 0; s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; }); (a_sum, b_sum) } let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32; let mut tmp_sum = 0; let downloads_this_year = sum2(this_year); let downloads_last_year = sum2(last_year); let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0); let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0); let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000; let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true, &[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150], |n| if n > 11 {format!("≥{n}")} else {n.to_string()}); let hs_deps2 = Histogram { max: hs_deps1.max, buckets: hs_deps1.buckets.split_off(10), bucket_labels: hs_deps1.bucket_labels.split_off(10), }; let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?; let mut hs_rev_deps = Histogram::new(rev_deps, true, &[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}); hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5)); let age_label = |n| match n { 0..=1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }; let total_crate_num = kitchen_sink.all_crates().count() as u32; let stats = GlobalStats { total_crate_num, total_owners_at_month, max_total_owners, max_daily_downloads_rate, start_week_offset, weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32, dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2), dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2), max_downloads_per_week, dl_grid_line_every, hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}), hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| { let mut t = format_bytes(n*1024); t.insert(0, '≤'); t }), hs_deps1, hs_deps2, hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n { 0 => "one-off".to_string(), 1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }), hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_owner_crates, categories, rustc_stats_all, rustc_stats_recent, rustc_stats_recent_num, hs_rev_deps, }; templates::global_stats(out, &Page { title: "State of the Rust/Cargo crates ecosystem".to_owned(), description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()), noindex: false, search_meta: true, critical_css_data: Some(include_str!("../../style/public/home.css")), critical_css_dev_url: Some("/home.css"), ..Default::default() }, &dl, &stats, &urler)?; Ok(()) } #[derive(Default, Copy, Clone, Debug)] pub struct Compat { pub(crate) bad: u32, pub(crate) maybe_bad: u32, pub(crate) unknown: u32, pub(crate) maybe_ok: u32, pub(crate) ok: u32, } impl Compat { pub fn sum(&self) -> u32 { self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok } } fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> { // (ok, maybe, not), [0] is unused let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize]; for c in compat.values() { // can't compile at all if!c.iter().any(|(_, c)| c.has_ever_built()) { continue; } // stats for latest crate version only let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) { Some((_, c)) => c, None => continue, }; let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) { Some((_, c)) => c, None => latest_ver, }; let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0); let newest_bad = latest_ver.newest_bad().unwrap_or(0); let oldest_ok = latest_ver.oldest_ok().unwrap_or(999); let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999); for (ver, c) in rustc_versions.iter_mut().enumerate() { let ver = ver as u16; if ver >= oldest_ok { if ver >= oldest_ok_raw { c.ok += 1; } else { c.maybe_ok += 1; } } else if ver <= newest_bad { if ver <= newest_bad_raw { c.bad += 1; } else { c.maybe_bad += 1; } } else { c.unknown += 1; } } } // resize to width let width = 330; for c in &mut rustc_versions { let sum = c.sum(); c.bad = (c.bad * width + width / 2) / sum; c.ok = (c.ok * width + width / 2) / sum; c.maybe_bad = (c.maybe_bad * width + width / 2) / sum; c.maybe_ok = (c.maybe_ok * width + width / 2) / sum; c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok; } Ok(rustc_versions) } fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> { let mut out = Vec::with_capacity(sub.len()); for c in sub.values() { if c.slug == "uncategorized" { continue; } out.push(TreeBox { cat: c, label: c.name.clone(), title: c.name.clone(), count: 0, weight: 0., bounds: treemap::Rect::new(), color: String::new(), font_size: 12., sub: cat_slugs(&c.sub), }); } out } #[derive(Debug, Clone)] pub struct TreeBox { pub cat: &'static Category, pub title: String, pub label: String, pub font_size: f64, /// SVG fill pub color: String, pub count: u32, pub weight: f64, pub bounds: treemap::Rect, pub sub: Vec<TreeBox>, } impl TreeBox { pub fn line_y(&self, nth: usize) -> f64 { self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64 } pub fn can_fit_count(&self) -> bool { self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h } } impl treemap::Mappable for TreeBox { fn size(&self) -> f64 { self.weight } fn bounds(&self) -> &treemap::Rect { &self.bounds } fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; } } async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> { use treemap::*; let mut roots = cat_slugs(&CATEGORIES.root); #[track_caller] fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); items.swap_remove(pos) } #[track_caller] fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); &mut items[pos] } fn new_cat(sub: Vec<TreeBox>) -> TreeBox { TreeBox { c
cat("database-implementations", &mut roots).label = "Database".into(); get_cat("simulation", &mut roots).label = "Sim".into(); get_cat("caching", &mut roots).label = "Cache".into(); get_cat("config", &mut roots).label = "Config".into(); get_cat("os", &mut roots).label = "OS".into(); get_cat("internationalization", &mut roots).label = "i18n".into(); get_cat("authentication", &mut roots).label = "Auth".into(); get_cat("visualization", &mut roots).label = "Visualize".into(); get_cat("accessibility", &mut roots).label = "a11y".into(); get_cat("compilers", &mut roots).label = "Lang".into(); get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into(); get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into(); get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into(); // group them in a more sensible way let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)]; roots.push(new_cat(parsers)); let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)]; roots.push(new_cat(hw)); let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)]; roots.push(new_cat(db)); let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)]; roots.push(new_cat(gg)); let int = take_cat("command-line-interface", &mut roots); let cli = vec![int, take_cat("command-line-utilities", &mut roots)]; roots.push(new_cat(cli)); let mut editors = take_cat("text-editors", &mut roots); editors.label = "Editors".into(); let txt = vec![ take_cat("text-processing", &mut roots), editors, take_cat("template-engine", &mut roots), take_cat("value-formatting", &mut roots), ]; roots.push(new_cat(txt)); let wasm = take_cat("wasm", &mut roots); get_cat("web-programming", &mut roots).sub.push(wasm); let mut asyn = take_cat("asynchronous", &mut roots); asyn.label = "Async".into(); get_cat("network-programming", &mut roots).sub.push(asyn); let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub); proc.label = "Proc macros".into(); get_cat("rust-patterns", &mut roots).sub.push(proc); let concurrency = take_cat("concurrency", &mut roots); get_cat("rust-patterns", &mut roots).sub.push(concurrency); let mut cr = get_cat("cryptography", &mut roots).sub.remove(0); cr.label = "Crypto Magic Beans".into(); roots.push(cr); // first layout of top-level boxes (won't be used for anything other than second layout) for top in roots.iter_mut() { let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? }; top.count = count; top.weight = weight; let mut top_copy = top.clone(); top_copy.sub = Vec::new(); for i in top.sub.iter_mut() { let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?; i.count = count; i.weight = weight; top.count += i.count; top.weight += i.weight; assert!(i.sub.is_empty()); } if top_copy.count > 0 { top.sub.insert(0, top_copy); } } let mut items_flattened = Vec::new(); let layout = TreemapLayout::new(); layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.)); for parent in roots.iter_mut() { let layout = TreemapLayout::new(); layout.layout_items(&mut parent.sub, parent.bounds); items_flattened.append(&mut parent.sub); } postprocess_treebox_items(&mut items_flattened); Ok(items_flattened) } fn postprocess_treebox_items(items: &mut Vec<TreeBox>) { let colors = [ [0xff, 0xf1, 0xe6], [0xe2, 0xec, 0xe9], [0xDC, 0xED, 0xC1], [0xcd, 0xda, 0xfd], [0xbe, 0xe1, 0xe6], [0xfd, 0xe2, 0xe4], [0xdf, 0xe7, 0xfd], [0xFF, 0xD3, 0xB6], [0xea, 0xe4, 0xe9], [0xd0, 0xd1, 0xff], [0xf4, 0xda, 0xe2], [0xde, 0xc3, 0xe1], [0xd4, 0xe0, 0xf9], [0xFF, 0xD3, 0xB6], [0xDF, 0xCB, 0xD2], ]; let len = items.len() as f32; for (i, item) in &mut items.iter_mut().enumerate() { let x = 0.8 + (i as f32 / len) * 0.2; let c = colors[i % colors.len()]; let c = [ (c[0] as f32 * x + (1. - x) * 200.) as u8, (c[1] as f32 * x + (1. - x) * 100.) as u8, (c[2] as f32 * x + (1. - x) * 200.) as u8 ]; let mut l = lab::Lab::from_rgb(&c); l.l = (l.l + 90.) * 0.5; // fix my bad palette let c = l.to_rgb(); item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]); let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize; let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false)); let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = maybe_label.len(); let try_font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize; let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.; let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break)); let chars = label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = label.len(); item.label = label.join("\n"); item.font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); } } async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> { let all_owners = kitchen_sink.crate_all_owners().await?; eprintln!("got {} owners", all_owners.len()); assert!(all_owners.len() > 1000); let mut owner_crates_with_ids = HashMap::new(); let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(
at: CATEGORIES.root.values().next().unwrap(), title: String::new(), label: String::new(), font_size: 0., color: String::new(), count: 0, weight: 0., bounds: Rect::new(), sub, } } // names don't fit get_
identifier_body
global_stats.rs
current = kitchen_sink.total_year_downloads(current_year)?; } let n = current[day.ordinal0() as usize]; match day.weekday() { // this sucks a bit due to mon/fri being UTC, and overlapping with the weekend // in the rest of the world. Weekday::Sat | Weekday::Sun => weekend_sum += n, _ => weekday_sum += n, }; day -= chrono::Duration::days(1); } dl.push((weekday_sum, weekend_sum)); } dl.reverse(); Ok(dl) } pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> { let (categories, recent_crates) = try_join( category_stats(kitchen_sink), kitchen_sink.notable_recently_updated_crates(4100)).await?; let urler = Urler::new(None); let start = Utc.ymd(2015, 5, 15); // Rust 1.0 let start_week_offset = start.ordinal0()/7; let end = Utc::today() - chrono::Duration::days(2); let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6; let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?; let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?; let mut recent_compat = HashMap::with_capacity(recent_crates.len()); let mut rustc_stats_recent_num = 0; for (o, _) in recent_crates { if let Some(v) = compat_data.remove(&o) { recent_compat.insert(o, v); rustc_stats_recent_num += 1; if rustc_stats_recent_num >= 4000 { break; } } } let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?; let dl = downloads_over_time(start, end, kitchen_sink)?; let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?; hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring assert!(dl.len() >= 52*2); let this_year = &dl[dl.len()-52..]; let last_year = &dl[dl.len()-52*2..dl.len()-52]; fn sum2(s: &[(u64, u64)]) -> (u64, u64) { let mut a_sum = 0; let mut b_sum = 0; s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; }); (a_sum, b_sum) } let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32; let mut tmp_sum = 0; let downloads_this_year = sum2(this_year); let downloads_last_year = sum2(last_year); let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0); let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0); let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000; let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true, &[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150], |n| if n > 11 {format!("≥{n}")} else {n.to_string()}); let hs_deps2 = Histogram { max: hs_deps1.max, buckets: hs_deps1.buckets.split_off(10), bucket_labels: hs_deps1.bucket_labels.split_off(10), }; let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?; let mut hs_rev_deps = Histogram::new(rev_deps, true, &[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}); hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5)); let age_label = |n| match n { 0..=1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }; let total_crate_num = kitchen_sink.all_crates().count() as u32; let stats = GlobalStats { total_crate_num, total_owners_at_month, max_total_owners, max_daily_downloads_rate, start_week_offset, weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32, dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2), dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2), max_downloads_per_week, dl_grid_line_every, hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}), hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| { let mut t = format_bytes(n*1024); t.insert(0, '≤'); t }), hs_deps1, hs_deps2, hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n { 0 => "one-off".to_string(), 1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }), hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_owner_crates, categories, rustc_stats_all, rustc_stats_recent, rustc_stats_recent_num, hs_rev_deps, }; templates::global_stats(out, &Page { title: "State of the Rust/Cargo crates ecosystem".to_owned(), description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()), noindex: false, search_meta: true, critical_css_data: Some(include_str!("../../style/public/home.css")), critical_css_dev_url: Some("/home.css"), ..Default::default() }, &dl, &stats, &urler)?; Ok(()) } #[derive(Default, Copy, Clone, Debug)] pub struct Compat { pub(crate) bad: u32, pub(crate) maybe_bad: u32, pub(crate) unknown: u32, pub(crate) maybe_ok: u32, pub(crate) ok: u32, } impl Compat { pub fn sum(&self) -> u32 { self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok } } fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> { // (ok, maybe, not), [0] is unused let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize]; for c in compat.values() { // can't compile at all if!c.iter().any(|(_, c)| c.has_ever_built()) { continue; } // stats for latest crate version only let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) { Some((_, c)) => c, None => continue, }; let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) { Some((_, c)) => c, None => latest_ver, }; let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0); let newest_bad = latest_ver.newest_bad().unwrap_or(0); let oldest_ok = latest_ver.oldest_ok().unwrap_or(999); let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999); for (ver, c) in rustc_versions.iter_mut().enumerate() { let ver = ver as u16; if ver >= oldest_ok { if ver >= oldest_ok_raw { c.ok += 1; } else { c.maybe_ok += 1; } } else if ver <= newest_bad { if ver <= newest_bad_raw { c.bad += 1; } else { c.maybe_bad += 1; } } else { c.unknown += 1; } } } // resize to width let width = 330; for c in &mut rustc_versions { let sum = c.sum(); c.bad = (c.bad * width + width / 2) / sum; c.ok = (c.ok * width + width / 2) / sum; c.maybe_bad = (c.maybe_bad * width + width / 2) / sum; c.maybe_ok = (c.maybe_ok * width + width / 2) / sum; c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok; } Ok(rustc_versions) } fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> { let mut out = Vec::with_capacity(sub.len()); for c in sub.values() { if c.slug == "uncategorized" { continue; } out.push(TreeBox { cat: c, label: c.name.clone(), title: c.name.clone(), count: 0, weight: 0., bounds: treemap::Rect::new(), color: String::new(), font_size: 12., sub: cat_slugs(&c.sub), }); } out } #[derive(Debug, Clone)] pub struct TreeBox { pub cat: &'static Category, pub title: String, pub label: String, pub font_size: f64, /// SVG fill pub color: String, pub count: u32, pub weight: f64, pub bounds: treemap::Rect, pub sub: Vec<TreeBox>, } impl TreeBox { pub fn line_y(&self, nth: usize) -> f64 { self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64 } pub fn can_fit_count(&self) -> bool { self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h } } impl treemap::Mappable for TreeBox { fn size(&self) -> f64 { self.weight } fn bounds(&self) -> &treemap::Rect { &self.bounds } fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; } } async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> { use treemap::*; let mut roots = cat_slugs(&CATEGORIES.root); #[track_caller] fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); items.swap_remove(pos) } #[track_caller] fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); &mut items[pos] } fn new_cat(sub: Vec<TreeBox>) -> TreeBox { TreeBox { cat: CATEGORIES.root.values().next().unwrap(), title: String::new(), label: String::new(), font_size: 0., color: String::new(), count: 0, weight: 0., bounds: Rect::new(), sub, } } // names don't fit get_cat("database-implementations", &mut roots).label = "Database".into(); get_cat("simulation", &mut roots).label = "Sim".into(); get_cat("caching", &mut roots).label = "Cache".into(); get_cat("config", &mut roots).label = "Config".into(); get_cat("os", &mut roots).label = "OS".into(); get_cat("internationalization", &mut roots).label = "i18n".into(); get_cat("authentication", &mut roots).label = "Auth".into(); get_cat("visualization", &mut roots).label = "Visualize".into(); get_cat("accessibility", &mut roots).label = "a11y".into(); get_cat("compilers", &mut roots).label = "Lang".into(); get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into(); get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into(); get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into(); // group them in a more sensible way let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)]; roots.push(new_cat(parsers)); let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)]; roots.push(new_cat(hw)); let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)]; roots.push(new_cat(db)); let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)]; roots.push(new_cat(gg)); let int = take_cat("command-line-interface", &mut roots); let cli = vec![int, take_cat("command-line-utilities", &mut roots)]; roots.push(new_cat(cli)); let mut editors = take_cat("text-editors", &mut roots); editors.label = "Editors".into(); let txt = vec![ take_cat("text-processing", &mut roots), editors, take_cat("template-engine", &mut roots), take_cat("value-formatting", &mut roots), ]; roots.push(new_cat(txt)); let wasm = take_cat("wasm", &mut roots); get_cat("web-programming", &mut roots).sub.push(wasm); let mut asyn = take_cat("asynchronous", &mut roots); asyn.label = "Async".into(); get_cat("network-programming", &mut roots).sub.push(asyn); let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub); proc.label = "Proc macros".into(); get_cat("rust-patterns", &mut roots).sub.push(proc); let concurrency = take_cat("concurrency", &mut roots); get_cat("rust-patterns", &mut roots).sub.push(concurrency); let mut cr = get_cat("cryptography", &mut roots).sub.remove(0); cr.label = "Crypto Magic Beans".into(); roots.push(cr); // first layout of top-level boxes (won't be used for anything other than second layout) for top in roots.iter_mut() { let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? }; top.count = count; top.weight = weight; let mut top_copy = top.clone(); top_copy.sub = Vec::new(); for i in top.sub.iter_mut() { let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?; i.count = count; i.weight = weight; top.count += i.count; top.weight += i.weight; assert!(i.sub.is_empty()); } if top_copy.count > 0 { top.sub.insert(0, top_copy); } } let mut items_flattened = Vec::new(); let layout = TreemapLayout::new(); layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.)); for parent in roots.iter_mut() { let layout = TreemapLayout::new(); layout.layout_items(&mut parent.sub, parent.bounds);
} postprocess_treebox_items(&mut items_flattened); Ok(items_flattened) } fn postprocess_treebox_items(items: &mut Vec<TreeBox>) { let colors = [ [0xff, 0xf1, 0xe6], [0xe2, 0xec, 0xe9], [0xDC, 0xED, 0xC1], [0xcd, 0xda, 0xfd], [0xbe, 0xe1, 0xe6], [0xfd, 0xe2, 0xe4], [0xdf, 0xe7, 0xfd], [0xFF, 0xD3, 0xB6], [0xea, 0xe4, 0xe9], [0xd0, 0xd1, 0xff], [0xf4, 0xda, 0xe2], [0xde, 0xc3, 0xe1], [0xd4, 0xe0, 0xf9], [0xFF, 0xD3, 0xB6], [0xDF, 0xCB, 0xD2], ]; let len = items.len() as f32; for (i, item) in &mut items.iter_mut().enumerate() { let x = 0.8 + (i as f32 / len) * 0.2; let c = colors[i % colors.len()]; let c = [ (c[0] as f32 * x + (1. - x) * 200.) as u8, (c[1] as f32 * x + (1. - x) * 100.) as u8, (c[2] as f32 * x + (1. - x) * 200.) as u8 ]; let mut l = lab::Lab::from_rgb(&c); l.l = (l.l + 90.) * 0.5; // fix my bad palette let c = l.to_rgb(); item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]); let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize; let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false)); let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = maybe_label.len(); let try_font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize; let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.; let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break)); let chars = label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = label.len(); item.label = label.join("\n"); item.font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); } } async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> { let all_owners = kitchen_sink.crate_all_owners().await?; eprintln!("got {} owners", all_owners.len()); assert!(all_owners.len() > 1000); let mut owner_crates_with_ids = HashMap::new(); let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).num
items_flattened.append(&mut parent.sub);
random_line_split
global_stats.rs
current = kitchen_sink.total_year_downloads(current_year)?; } let n = current[day.ordinal0() as usize]; match day.weekday() { // this sucks a bit due to mon/fri being UTC, and overlapping with the weekend // in the rest of the world. Weekday::Sat | Weekday::Sun => weekend_sum += n, _ => weekday_sum += n, }; day -= chrono::Duration::days(1); } dl.push((weekday_sum, weekend_sum)); } dl.reverse(); Ok(dl) } pub async fn render_global_stats(out: &mut impl Write, kitchen_sink: &KitchenSink, _renderer: &Renderer) -> Result<(), anyhow::Error> { let (categories, recent_crates) = try_join( category_stats(kitchen_sink), kitchen_sink.notable_recently_updated_crates(4100)).await?; let urler = Urler::new(None); let start = Utc.ymd(2015, 5, 15); // Rust 1.0 let start_week_offset = start.ordinal0()/7; let end = Utc::today() - chrono::Duration::days(2); let latest_rustc_version = end.signed_duration_since(start).num_weeks()/6; let mut compat_data = tokio::task::block_in_place(|| kitchen_sink.all_crate_compat())?; let rustc_stats_all = rustc_stats(&compat_data, latest_rustc_version as u16)?; let mut recent_compat = HashMap::with_capacity(recent_crates.len()); let mut rustc_stats_recent_num = 0; for (o, _) in recent_crates { if let Some(v) = compat_data.remove(&o) { recent_compat.insert(o, v); rustc_stats_recent_num += 1; if rustc_stats_recent_num >= 4000 { break; } } } let rustc_stats_recent = rustc_stats(&recent_compat, latest_rustc_version as u16)?; let dl = downloads_over_time(start, end, kitchen_sink)?; let (total_owners_at_month, mut hs_owner_crates) = owner_stats(kitchen_sink, start).await?; hs_owner_crates.buckets.iter_mut().take(4).for_each(|c| c.examples.truncate(6)); // normal amount of crates is boring assert!(dl.len() >= 52*2); let this_year = &dl[dl.len()-52..]; let last_year = &dl[dl.len()-52*2..dl.len()-52]; fn sum2(s: &[(u64, u64)]) -> (u64, u64) { let mut a_sum = 0; let mut b_sum = 0; s.iter().for_each(|&(a, b)| { a_sum += a; b_sum += b; }); (a_sum, b_sum) } let max_daily_downloads_rate = this_year.iter().map(move |(d, e)| (d/5).max(e/2)).max().unwrap_or(0) as u32; let mut tmp_sum = 0; let downloads_this_year = sum2(this_year); let downloads_last_year = sum2(last_year); let max_downloads_per_week = dl.iter().map(|(a, b)| a + b).max().unwrap_or(0); let max_total_owners = total_owners_at_month.iter().copied().max().unwrap_or(0); let dl_grid_line_every = (max_downloads_per_week / 6_000_000) * 1_000_000; let mut hs_deps1 = Histogram::new(kitchen_sink.get_stats_histogram("deps")?.expect("hs_deps"), true, &[0,1,2,3,4,5,6,7,8,9,10,11,12,14,16,18,20,25,30,40,60,80,100,120,150], |n| if n > 11 {format!("≥{n}")} else {n.
let hs_deps2 = Histogram { max: hs_deps1.max, buckets: hs_deps1.buckets.split_off(10), bucket_labels: hs_deps1.bucket_labels.split_off(10), }; let rev_deps = kitchen_sink.crates_io_all_rev_deps_counts().await?; let mut hs_rev_deps = Histogram::new(rev_deps, true, &[0,1,2,5,15,50,100,250,500,750,1000,2500,5000,10000,15000,20000,50000], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}); hs_rev_deps.buckets.iter_mut().take(5).for_each(|b| b.examples.truncate(5)); let age_label = |n| match n { 0..=1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }; let total_crate_num = kitchen_sink.all_crates().count() as u32; let stats = GlobalStats { total_crate_num, total_owners_at_month, max_total_owners, max_daily_downloads_rate, start_week_offset, weeks_to_reach_max_downloads: dl.iter().copied().take_while(move |(d, e)| { tmp_sum += (d + e) as u32; tmp_sum < max_daily_downloads_rate }).count() as u32, dl_per_day_this_year: (downloads_this_year.0 / 5, downloads_this_year.1 / 2), dl_per_day_last_year: (downloads_last_year.0 / 5, downloads_last_year.1 / 2), max_downloads_per_week, dl_grid_line_every, hs_releases: Histogram::new(kitchen_sink.get_stats_histogram("releases")?.expect("hs_releases"), true, &[1,2,4,8,16,32,50,100,500], |n| if n > 2 {format!("≥{n}")} else {n.to_string()}), hs_sizes: Histogram::new(kitchen_sink.get_stats_histogram("sizes")?.expect("hs_sizes"), true, &[1,10,50,100,500,1_000,5_000,10_000,20_000], |n| { let mut t = format_bytes(n*1024); t.insert(0, '≤'); t }), hs_deps1, hs_deps2, hs_maintenance: Histogram::new(kitchen_sink.get_stats_histogram("maintenance")?.expect("hs_maintenance"), false, &[0, 1, 5, 26, 52, 52*2, 52*3, 52*5, 52*6, 52*8], |n| match n { 0 => "one-off".to_string(), 1 => "≤1 week".to_string(), 2..=4 => format!("≤{n} weeks"), 5 => "≤1 month".to_string(), 6..=51 => format!("≤{} months", (n as f64 / (365./12./7.)).round()), 52 => "≤1 year".to_string(), _ => format!("≤{} years", (n as f64 / 52.).round()), }), hs_age: Histogram::new(kitchen_sink.get_stats_histogram("age")?.expect("hs_age"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_languish: Histogram::new(kitchen_sink.get_stats_histogram("languish")?.expect("hs_languish"), false, &[5, 13, 26, 52, 52*2, 52*3, 52*4, 52*5, 52*6, 52*8], age_label), hs_owner_crates, categories, rustc_stats_all, rustc_stats_recent, rustc_stats_recent_num, hs_rev_deps, }; templates::global_stats(out, &Page { title: "State of the Rust/Cargo crates ecosystem".to_owned(), description: Some("How many packages there are? How many dependencies they have? Which crate is the oldest or biggest? Is Rust usage growing?".to_owned()), noindex: false, search_meta: true, critical_css_data: Some(include_str!("../../style/public/home.css")), critical_css_dev_url: Some("/home.css"), ..Default::default() }, &dl, &stats, &urler)?; Ok(()) } #[derive(Default, Copy, Clone, Debug)] pub struct Compat { pub(crate) bad: u32, pub(crate) maybe_bad: u32, pub(crate) unknown: u32, pub(crate) maybe_ok: u32, pub(crate) ok: u32, } impl Compat { pub fn sum(&self) -> u32 { self.bad + self.maybe_bad + self.unknown + self.maybe_ok + self.ok } } fn rustc_stats(compat: &HashMap<Origin, CompatByCrateVersion>, max_rust_version: u16) -> Result<Vec<Compat>, anyhow::Error> { // (ok, maybe, not), [0] is unused let mut rustc_versions = vec![Compat::default(); (max_rust_version+1) as usize]; for c in compat.values() { // can't compile at all if!c.iter().any(|(_, c)| c.has_ever_built()) { continue; } // stats for latest crate version only let latest_ver = match c.iter().rfind(|(v, _)| v.pre.is_empty()).or_else(|| c.iter().rev().next()) { Some((_, c)) => c, None => continue, }; let latest_ver_bad = match c.iter().rfind(|(v, c)| v.pre.is_empty() && c.newest_bad_likely().is_some()) { Some((_, c)) => c, None => latest_ver, }; let newest_bad_raw = latest_ver_bad.newest_bad_likely().unwrap_or(0); let newest_bad = latest_ver.newest_bad().unwrap_or(0); let oldest_ok = latest_ver.oldest_ok().unwrap_or(999); let oldest_ok_raw = latest_ver.oldest_ok_certain().unwrap_or(999); for (ver, c) in rustc_versions.iter_mut().enumerate() { let ver = ver as u16; if ver >= oldest_ok { if ver >= oldest_ok_raw { c.ok += 1; } else { c.maybe_ok += 1; } } else if ver <= newest_bad { if ver <= newest_bad_raw { c.bad += 1; } else { c.maybe_bad += 1; } } else { c.unknown += 1; } } } // resize to width let width = 330; for c in &mut rustc_versions { let sum = c.sum(); c.bad = (c.bad * width + width / 2) / sum; c.ok = (c.ok * width + width / 2) / sum; c.maybe_bad = (c.maybe_bad * width + width / 2) / sum; c.maybe_ok = (c.maybe_ok * width + width / 2) / sum; c.unknown = width - c.bad - c.ok - c.maybe_bad - c.maybe_ok; } Ok(rustc_versions) } fn cat_slugs(sub: &'static CategoryMap) -> Vec<TreeBox> { let mut out = Vec::with_capacity(sub.len()); for c in sub.values() { if c.slug == "uncategorized" { continue; } out.push(TreeBox { cat: c, label: c.name.clone(), title: c.name.clone(), count: 0, weight: 0., bounds: treemap::Rect::new(), color: String::new(), font_size: 12., sub: cat_slugs(&c.sub), }); } out } #[derive(Debug, Clone)] pub struct TreeBox { pub cat: &'static Category, pub title: String, pub label: String, pub font_size: f64, /// SVG fill pub color: String, pub count: u32, pub weight: f64, pub bounds: treemap::Rect, pub sub: Vec<TreeBox>, } impl TreeBox { pub fn line_y(&self, nth: usize) -> f64 { self.bounds.y + 1. + self.font_size * 1.1 * (nth+1) as f64 } pub fn can_fit_count(&self) -> bool { self.line_y(self.label.lines().count()) + 1. - self.bounds.y < self.bounds.h } } impl treemap::Mappable for TreeBox { fn size(&self) -> f64 { self.weight } fn bounds(&self) -> &treemap::Rect { &self.bounds } fn set_bounds(&mut self, b: treemap::Rect) { self.bounds = b; } } async fn category_stats(kitchen_sink: &KitchenSink) -> Result<Vec<TreeBox>, anyhow::Error> { use treemap::*; let mut roots = cat_slugs(&CATEGORIES.root); #[track_caller] fn take_cat(slug: &str, items: &mut Vec<TreeBox>) -> TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); items.swap_remove(pos) } #[track_caller] fn get_cat<'a>(slug: &str, items: &'a mut Vec<TreeBox>) -> &'a mut TreeBox { let pos = items.iter().position(|i| i.cat.slug == slug).unwrap_or_else(|| panic!("{slug} in {items:?}")); &mut items[pos] } fn new_cat(sub: Vec<TreeBox>) -> TreeBox { TreeBox { cat: CATEGORIES.root.values().next().unwrap(), title: String::new(), label: String::new(), font_size: 0., color: String::new(), count: 0, weight: 0., bounds: Rect::new(), sub, } } // names don't fit get_cat("database-implementations", &mut roots).label = "Database".into(); get_cat("simulation", &mut roots).label = "Sim".into(); get_cat("caching", &mut roots).label = "Cache".into(); get_cat("config", &mut roots).label = "Config".into(); get_cat("os", &mut roots).label = "OS".into(); get_cat("internationalization", &mut roots).label = "i18n".into(); get_cat("authentication", &mut roots).label = "Auth".into(); get_cat("visualization", &mut roots).label = "Visualize".into(); get_cat("accessibility", &mut roots).label = "a11y".into(); get_cat("compilers", &mut roots).label = "Lang".into(); get_cat("os::macos-apis", &mut get_cat("os", &mut roots).sub).label = "Apple".into(); get_cat("rendering::engine", &mut get_cat("rendering", &mut roots).sub).label = "Engine".into(); get_cat("rendering::data-formats", &mut get_cat("rendering", &mut roots).sub).label = "Formats".into(); // group them in a more sensible way let parsers = vec![take_cat("parsing", &mut roots), take_cat("parser-implementations", &mut roots)]; roots.push(new_cat(parsers)); let hw = vec![take_cat("embedded", &mut roots), take_cat("hardware-support", &mut roots), take_cat("no-std", &mut roots)]; roots.push(new_cat(hw)); let db = vec![take_cat("database", &mut roots), take_cat("database-implementations", &mut roots)]; roots.push(new_cat(db)); let gg = vec![take_cat("game-development", &mut roots), take_cat("games", &mut roots)]; roots.push(new_cat(gg)); let int = take_cat("command-line-interface", &mut roots); let cli = vec![int, take_cat("command-line-utilities", &mut roots)]; roots.push(new_cat(cli)); let mut editors = take_cat("text-editors", &mut roots); editors.label = "Editors".into(); let txt = vec![ take_cat("text-processing", &mut roots), editors, take_cat("template-engine", &mut roots), take_cat("value-formatting", &mut roots), ]; roots.push(new_cat(txt)); let wasm = take_cat("wasm", &mut roots); get_cat("web-programming", &mut roots).sub.push(wasm); let mut asyn = take_cat("asynchronous", &mut roots); asyn.label = "Async".into(); get_cat("network-programming", &mut roots).sub.push(asyn); let mut proc = take_cat("development-tools::procedural-macro-helpers", &mut get_cat("development-tools", &mut roots).sub); proc.label = "Proc macros".into(); get_cat("rust-patterns", &mut roots).sub.push(proc); let concurrency = take_cat("concurrency", &mut roots); get_cat("rust-patterns", &mut roots).sub.push(concurrency); let mut cr = get_cat("cryptography", &mut roots).sub.remove(0); cr.label = "Crypto Magic Beans".into(); roots.push(cr); // first layout of top-level boxes (won't be used for anything other than second layout) for top in roots.iter_mut() { let (count, weight) = if top.label.is_empty() { (0, 0.) } else { kitchen_sink.category_crate_count(&top.cat.slug).await? }; top.count = count; top.weight = weight; let mut top_copy = top.clone(); top_copy.sub = Vec::new(); for i in top.sub.iter_mut() { let (count, weight) = kitchen_sink.category_crate_count(&i.cat.slug).await?; i.count = count; i.weight = weight; top.count += i.count; top.weight += i.weight; assert!(i.sub.is_empty()); } if top_copy.count > 0 { top.sub.insert(0, top_copy); } } let mut items_flattened = Vec::new(); let layout = TreemapLayout::new(); layout.layout_items(&mut roots, Rect::from_points(0.0, 0.0, 1000., 600.)); for parent in roots.iter_mut() { let layout = TreemapLayout::new(); layout.layout_items(&mut parent.sub, parent.bounds); items_flattened.append(&mut parent.sub); } postprocess_treebox_items(&mut items_flattened); Ok(items_flattened) } fn postprocess_treebox_items(items: &mut Vec<TreeBox>) { let colors = [ [0xff, 0xf1, 0xe6], [0xe2, 0xec, 0xe9], [0xDC, 0xED, 0xC1], [0xcd, 0xda, 0xfd], [0xbe, 0xe1, 0xe6], [0xfd, 0xe2, 0xe4], [0xdf, 0xe7, 0xfd], [0xFF, 0xD3, 0xB6], [0xea, 0xe4, 0xe9], [0xd0, 0xd1, 0xff], [0xf4, 0xda, 0xe2], [0xde, 0xc3, 0xe1], [0xd4, 0xe0, 0xf9], [0xFF, 0xD3, 0xB6], [0xDF, 0xCB, 0xD2], ]; let len = items.len() as f32; for (i, item) in &mut items.iter_mut().enumerate() { let x = 0.8 + (i as f32 / len) * 0.2; let c = colors[i % colors.len()]; let c = [ (c[0] as f32 * x + (1. - x) * 200.) as u8, (c[1] as f32 * x + (1. - x) * 100.) as u8, (c[2] as f32 * x + (1. - x) * 200.) as u8 ]; let mut l = lab::Lab::from_rgb(&c); l.l = (l.l + 90.) * 0.5; // fix my bad palette let c = l.to_rgb(); item.color = format!("#{:02x}{:02x}{:02x}", c[0], c[1], c[2]); let ideal_max_width = (item.bounds.w * 1.2 / (item.font_size / 1.7)) as usize; let maybe_label = textwrap::wrap(&item.label, textwrap::Options::new(ideal_max_width).break_words(false)); let chars = maybe_label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = maybe_label.len(); let try_font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); let max_width = (item.bounds.w / (try_font_size / 1.7)) as usize; let must_break = ideal_max_width < chars * 2 / 3 && item.bounds.h > item.font_size * 2.; let label = textwrap::wrap(&item.label, textwrap::Options::new(max_width).break_words(must_break)); let chars = label.iter().map(|w| w.len()).max().unwrap_or(1); let lines = label.len(); item.label = label.join("\n"); item.font_size = item.font_size .min(item.bounds.h / (lines as f64 * 1.05) - 4.) .min(item.bounds.w * 1.6 / chars as f64) .max(4.); } } async fn owner_stats(kitchen_sink: &KitchenSink, start: Date<Utc>) -> Result<(Vec<u32>, Histogram), anyhow::Error> { let all_owners = kitchen_sink.crate_all_owners().await?; eprintln!("got {} owners", all_owners.len()); assert!(all_owners.len() > 1000); let mut owner_crates_with_ids = HashMap::new(); let mut total_owners_at_month = vec![0u32; (Utc::today().signed_duration_since(start).
to_string()});
conditional_block
mod.rs
mod field_names_encoder; use self::field_names_encoder::FieldNamesEncoder; use csv::{self, Result}; use rustc_serialize::Encodable; use std::fs::File; use std::io::{BufWriter, Write}; use std::marker::PhantomData; use std::path::Path; /// A CSV writer that automatically writes the headers. /// /// This writer provides a convenient interface for encoding CSV data. While /// creating CSV data is much easier than parsing it, having a writer can be /// convenient because it can handle quoting for you automatically. Moreover, /// this particular writer supports [`rustc_serialize::Encodable`][Encodable] /// types, which makes it easy to write your custom types as CSV records and /// automatically generate headers. /// /// All CSV data produced by this writer, with default options, conforms with /// [RFC 4180](http://tools.ietf.org/html/rfc4180). /// /// One slight deviation is that records with a single empty field are always /// encoded as `""`. This ensures that the record is not skipped since some /// CSV parsers will ignore consecutive record terminators (like the one in /// this crate). /// /// If you don't care want the writer to automatically write the header row, /// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead. /// /// # Example /// /// Here's an example that encodes a zoo of animals: /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Record { /// count: usize, /// animal: &'static str, /// description: &'static str, /// } /// /// let records = vec![ /// Record { count: 7, animal: "penguin", description: "happy" }, /// Record { count: 10, animal: "cheetah", description: "fast" }, /// Record { count: 4, animal: "armadillo", description: "armored" }, /// Record { count: 9, animal: "platypus", description: "unique" }, /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,description /// 7,penguin,happy /// 10,cheetah,fast /// 4,armadillo,armored /// 9,platypus,unique /// "); /// # } /// ``` /// /// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html pub struct Writer<W: Write, E: Encodable> { csv: csv::Writer<W>, first_row: bool, record_type: PhantomData<E>, } impl<E: Encodable> Writer<File, E> { /// Creates a new typed CSV writer that writes to the file path given. /// /// The file is created if it does not already exist and is truncated /// otherwise. pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> { Ok(Self::from_csv_writer(csv::Writer::from_file(path)?)) } } impl<W: Write, E: Encodable> Writer<W, E> { /// Creates a new typed CSV writer that writes to the `io::Write` given. /// /// Note that the writer is buffered for you automatically. pub fn from_writer(w: W) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_writer(w)) } /// Creates a new typed CSV writer that writes to the CSV writer given. /// /// This lets you specify options to the underlying CSV writer (e.g. to use /// a different delimiter). pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> { Writer { csv: w, first_row: true, record_type: PhantomData, } } /// Creates a new typed CSV writer that writes to the buffer given. /// /// This lets you specify your own buffered writer (e.g., use a different /// capacity). All other constructors wrap the writer given in a buffer /// with default capacity. pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_buffer(buf)) } } impl<E: Encodable> Writer<Vec<u8>, E> { /// Creates a new CSV writer that writes to an in memory buffer. At any /// time, `as_string` or `as_bytes` can be called to retrieve the /// cumulative CSV data. pub fn from_memory() -> Writer<Vec<u8>, E> { Self::from_csv_writer(csv::Writer::from_memory()) } /// Returns the written CSV data as a string. pub fn as_string(&mut self) -> &str { self.csv.as_string() } /// Returns the encoded CSV data as raw bytes. pub fn as_bytes(&mut self) -> &[u8] { self.csv.as_bytes() } /// Convert the Writer into a string of written CSV data pub fn into_string(self) -> String { self.csv.into_string() } /// Convert the Writer into a vector of encoded CSV bytes. pub fn into_bytes(self) -> Vec<u8> { self.csv.into_bytes() } } impl<W: Write, E: Encodable> Writer<W, E> { /// Writes a record by encoding any `Encodable` value. /// /// When the first record is encoded, the headers (the field names in the /// struct) are written prior to encoding the record. /// /// The type that is being encoded into should correspond to *one full CSV /// record*. This can be a single struct, or arbitrarily nested tuples, /// arrays, Vecs, and structs, as long as all scalar types (integers, /// floats, characters, strings, collections containing one scalar, and /// enums with 0 or 1 scalar arguments) are fields in structs. Enums with /// zero arguments encode to their name, while enums of one argument encode /// to their constituent value. `Option` types are also supported. (`None` /// encodes to an empty field.) /// /// Note that single-element tuple structs (the newtype pattern) are /// supported. Unfortunately, to provide this functionality, a heuristic is /// necessary to differentiate field names in normal structs from those in /// tuple structs. As a result, field names in normal structs should not be /// of the form `_field{}` where `{}` is its position in the struct. /// /// # Example /// /// This example encodes a zoo animals with may not have a description. /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Count(usize); /// /// #[derive(RustcEncodable)] /// enum Group { /// Bird, /// Mammal, /// } /// /// #[derive(RustcEncodable)] /// struct Part1 { /// count: Count, /// animal: &'static str, /// } /// /// #[derive(RustcEncodable)] /// struct Part2 { /// group: Group, /// description: Option<&'static str>, /// } /// /// let records = vec![ /// ( /// Part1 { count: Count(7), animal: "penguin" }, /// Part2 { group: Group::Bird, description: Some("happy") }, /// ), /// ( /// Part1 { count: Count(10), animal: "cheetah" }, /// Part2 { group: Group::Mammal, description: Some("fast") }, /// ), /// ( /// Part1 { count: Count(4), animal: "armadillo" }, /// Part2 { group: Group::Mammal, description: Some("armored") }, /// ), /// ( /// Part1 { count: Count(9), animal: "platypus" }, /// Part2 { group: Group::Mammal, description: None }, /// ), /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,group,description /// 7,penguin,Bird,happy /// 10,cheetah,Mammal,fast /// 4,armadillo,Mammal,armored /// 9,platypus,Mammal, /// "); /// # } /// ``` pub fn encode(&mut self, row: E) -> csv::Result<()> { // Write headers if this is the first row. if self.first_row
// Write row. let mut erecord = csv::Encoded::new(); row.encode(&mut erecord)?; self.csv.write(erecord.unwrap().into_iter()) } /// Flushes the underlying buffer. pub fn flush(&mut self) -> Result<()> { self.csv.flush() } } #[cfg(test)] mod tests { use super::Writer; #[derive(RustcEncodable)] struct SimpleStruct { a: usize, b: usize, } #[test] fn test_struct() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; w.encode(s1).unwrap(); let s2 = SimpleStruct { a: 3, b: 4 }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n"); } #[test] fn test_tuple_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode((s1, s2)).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode((s3, s4)).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_array_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode([s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode([s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_vec_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode(vec![s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode(vec![s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_nested_tuples_of_structs() { let mut w = Writer::from_memory(); w.encode((SimpleStruct { a: 0, b: 1 }, (SimpleStruct { a: 2, b: 3 }), (SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 })))) .unwrap(); w.encode((SimpleStruct { a: 8, b: 9 }, (SimpleStruct { a: 10, b: 11 }), (SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 })))) .unwrap(); assert_eq!(w.as_string(), "a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n"); } #[derive(RustcEncodable)] struct StructWithLengthOneSeqs { a: [usize; 1], b: Vec<usize>, c: (usize), } #[test] fn test_struct_with_len_one_seqs() { let mut w = Writer::from_memory(); let s1 = StructWithLengthOneSeqs { a: [0], b: vec![1], c: (2), }; w.encode(s1).unwrap(); let s2 = StructWithLengthOneSeqs { a: [3], b: vec![4], c: (5), }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n"); } #[derive(RustcEncodable)] struct StructOfStruct { p: SimpleStruct, q: (usize, usize), } #[should_panic] #[test] fn test_struct_of_struct() { let mut w = Writer::from_memory(); let s = StructOfStruct { p: SimpleStruct { a: 0, b: 1 }, q: (2, 3), }; w.encode(s).unwrap(); } #[derive(RustcEncodable)] struct StructWithLongerSeq { a: [usize; 2], } #[should_panic] #[test] fn test_struct_with_longer_seq() { let mut w = Writer::from_memory(); let s = StructWithLongerSeq { a: [0, 1] }; w.encode(s).unwrap(); } #[should_panic] #[test] fn test_vec() { let mut w = Writer::from_memory(); let array = vec![0, 1]; w.encode(array).unwrap(); } }
{ let mut field_names_encoder = FieldNamesEncoder::new(); row.encode(&mut field_names_encoder)?; self.csv.write(field_names_encoder.into_field_names().into_iter())?; self.first_row = false; }
conditional_block
mod.rs
mod field_names_encoder; use self::field_names_encoder::FieldNamesEncoder; use csv::{self, Result}; use rustc_serialize::Encodable; use std::fs::File; use std::io::{BufWriter, Write}; use std::marker::PhantomData; use std::path::Path; /// A CSV writer that automatically writes the headers. /// /// This writer provides a convenient interface for encoding CSV data. While /// creating CSV data is much easier than parsing it, having a writer can be /// convenient because it can handle quoting for you automatically. Moreover, /// this particular writer supports [`rustc_serialize::Encodable`][Encodable] /// types, which makes it easy to write your custom types as CSV records and /// automatically generate headers. /// /// All CSV data produced by this writer, with default options, conforms with /// [RFC 4180](http://tools.ietf.org/html/rfc4180). /// /// One slight deviation is that records with a single empty field are always /// encoded as `""`. This ensures that the record is not skipped since some /// CSV parsers will ignore consecutive record terminators (like the one in /// this crate). /// /// If you don't care want the writer to automatically write the header row, /// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead. /// /// # Example /// /// Here's an example that encodes a zoo of animals: /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Record { /// count: usize, /// animal: &'static str, /// description: &'static str, /// } /// /// let records = vec![ /// Record { count: 7, animal: "penguin", description: "happy" }, /// Record { count: 10, animal: "cheetah", description: "fast" }, /// Record { count: 4, animal: "armadillo", description: "armored" }, /// Record { count: 9, animal: "platypus", description: "unique" }, /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,description /// 7,penguin,happy /// 10,cheetah,fast /// 4,armadillo,armored /// 9,platypus,unique /// "); /// # } /// ``` /// /// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html pub struct Writer<W: Write, E: Encodable> { csv: csv::Writer<W>, first_row: bool, record_type: PhantomData<E>, } impl<E: Encodable> Writer<File, E> { /// Creates a new typed CSV writer that writes to the file path given. /// /// The file is created if it does not already exist and is truncated /// otherwise. pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> { Ok(Self::from_csv_writer(csv::Writer::from_file(path)?)) } } impl<W: Write, E: Encodable> Writer<W, E> { /// Creates a new typed CSV writer that writes to the `io::Write` given. /// /// Note that the writer is buffered for you automatically. pub fn from_writer(w: W) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_writer(w)) } /// Creates a new typed CSV writer that writes to the CSV writer given. /// /// This lets you specify options to the underlying CSV writer (e.g. to use /// a different delimiter). pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> { Writer { csv: w, first_row: true, record_type: PhantomData, } } /// Creates a new typed CSV writer that writes to the buffer given. /// /// This lets you specify your own buffered writer (e.g., use a different /// capacity). All other constructors wrap the writer given in a buffer /// with default capacity. pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_buffer(buf)) } } impl<E: Encodable> Writer<Vec<u8>, E> { /// Creates a new CSV writer that writes to an in memory buffer. At any /// time, `as_string` or `as_bytes` can be called to retrieve the /// cumulative CSV data. pub fn from_memory() -> Writer<Vec<u8>, E> { Self::from_csv_writer(csv::Writer::from_memory()) } /// Returns the written CSV data as a string. pub fn as_string(&mut self) -> &str { self.csv.as_string() } /// Returns the encoded CSV data as raw bytes. pub fn as_bytes(&mut self) -> &[u8] { self.csv.as_bytes() } /// Convert the Writer into a string of written CSV data pub fn into_string(self) -> String { self.csv.into_string() } /// Convert the Writer into a vector of encoded CSV bytes. pub fn into_bytes(self) -> Vec<u8> { self.csv.into_bytes() } } impl<W: Write, E: Encodable> Writer<W, E> { /// Writes a record by encoding any `Encodable` value. /// /// When the first record is encoded, the headers (the field names in the /// struct) are written prior to encoding the record. /// /// The type that is being encoded into should correspond to *one full CSV /// record*. This can be a single struct, or arbitrarily nested tuples, /// arrays, Vecs, and structs, as long as all scalar types (integers, /// floats, characters, strings, collections containing one scalar, and /// enums with 0 or 1 scalar arguments) are fields in structs. Enums with /// zero arguments encode to their name, while enums of one argument encode /// to their constituent value. `Option` types are also supported. (`None` /// encodes to an empty field.) /// /// Note that single-element tuple structs (the newtype pattern) are /// supported. Unfortunately, to provide this functionality, a heuristic is /// necessary to differentiate field names in normal structs from those in /// tuple structs. As a result, field names in normal structs should not be /// of the form `_field{}` where `{}` is its position in the struct. /// /// # Example /// /// This example encodes a zoo animals with may not have a description. /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Count(usize); /// /// #[derive(RustcEncodable)] /// enum Group { /// Bird, /// Mammal, /// } /// /// #[derive(RustcEncodable)] /// struct Part1 { /// count: Count, /// animal: &'static str, /// } /// /// #[derive(RustcEncodable)] /// struct Part2 { /// group: Group, /// description: Option<&'static str>, /// } /// /// let records = vec![ /// ( /// Part1 { count: Count(7), animal: "penguin" }, /// Part2 { group: Group::Bird, description: Some("happy") }, /// ), /// ( /// Part1 { count: Count(10), animal: "cheetah" }, /// Part2 { group: Group::Mammal, description: Some("fast") }, /// ), /// ( /// Part1 { count: Count(4), animal: "armadillo" }, /// Part2 { group: Group::Mammal, description: Some("armored") }, /// ), /// ( /// Part1 { count: Count(9), animal: "platypus" }, /// Part2 { group: Group::Mammal, description: None }, /// ), /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,group,description /// 7,penguin,Bird,happy /// 10,cheetah,Mammal,fast /// 4,armadillo,Mammal,armored /// 9,platypus,Mammal, /// "); /// # } /// ``` pub fn encode(&mut self, row: E) -> csv::Result<()> { // Write headers if this is the first row. if self.first_row { let mut field_names_encoder = FieldNamesEncoder::new(); row.encode(&mut field_names_encoder)?; self.csv.write(field_names_encoder.into_field_names().into_iter())?; self.first_row = false; } // Write row. let mut erecord = csv::Encoded::new(); row.encode(&mut erecord)?; self.csv.write(erecord.unwrap().into_iter()) } /// Flushes the underlying buffer. pub fn flush(&mut self) -> Result<()> { self.csv.flush() } } #[cfg(test)] mod tests { use super::Writer; #[derive(RustcEncodable)] struct SimpleStruct { a: usize, b: usize, } #[test] fn
() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; w.encode(s1).unwrap(); let s2 = SimpleStruct { a: 3, b: 4 }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n"); } #[test] fn test_tuple_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode((s1, s2)).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode((s3, s4)).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_array_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode([s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode([s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_vec_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode(vec![s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode(vec![s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_nested_tuples_of_structs() { let mut w = Writer::from_memory(); w.encode((SimpleStruct { a: 0, b: 1 }, (SimpleStruct { a: 2, b: 3 }), (SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 })))) .unwrap(); w.encode((SimpleStruct { a: 8, b: 9 }, (SimpleStruct { a: 10, b: 11 }), (SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 })))) .unwrap(); assert_eq!(w.as_string(), "a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n"); } #[derive(RustcEncodable)] struct StructWithLengthOneSeqs { a: [usize; 1], b: Vec<usize>, c: (usize), } #[test] fn test_struct_with_len_one_seqs() { let mut w = Writer::from_memory(); let s1 = StructWithLengthOneSeqs { a: [0], b: vec![1], c: (2), }; w.encode(s1).unwrap(); let s2 = StructWithLengthOneSeqs { a: [3], b: vec![4], c: (5), }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n"); } #[derive(RustcEncodable)] struct StructOfStruct { p: SimpleStruct, q: (usize, usize), } #[should_panic] #[test] fn test_struct_of_struct() { let mut w = Writer::from_memory(); let s = StructOfStruct { p: SimpleStruct { a: 0, b: 1 }, q: (2, 3), }; w.encode(s).unwrap(); } #[derive(RustcEncodable)] struct StructWithLongerSeq { a: [usize; 2], } #[should_panic] #[test] fn test_struct_with_longer_seq() { let mut w = Writer::from_memory(); let s = StructWithLongerSeq { a: [0, 1] }; w.encode(s).unwrap(); } #[should_panic] #[test] fn test_vec() { let mut w = Writer::from_memory(); let array = vec![0, 1]; w.encode(array).unwrap(); } }
test_struct
identifier_name
mod.rs
mod field_names_encoder; use self::field_names_encoder::FieldNamesEncoder; use csv::{self, Result}; use rustc_serialize::Encodable; use std::fs::File; use std::io::{BufWriter, Write}; use std::marker::PhantomData; use std::path::Path; /// A CSV writer that automatically writes the headers. /// /// This writer provides a convenient interface for encoding CSV data. While /// creating CSV data is much easier than parsing it, having a writer can be /// convenient because it can handle quoting for you automatically. Moreover, /// this particular writer supports [`rustc_serialize::Encodable`][Encodable] /// types, which makes it easy to write your custom types as CSV records and /// automatically generate headers. /// /// All CSV data produced by this writer, with default options, conforms with /// [RFC 4180](http://tools.ietf.org/html/rfc4180). /// /// One slight deviation is that records with a single empty field are always /// encoded as `""`. This ensures that the record is not skipped since some /// CSV parsers will ignore consecutive record terminators (like the one in /// this crate). /// /// If you don't care want the writer to automatically write the header row, /// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead. /// /// # Example /// /// Here's an example that encodes a zoo of animals: /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Record { /// count: usize, /// animal: &'static str, /// description: &'static str, /// } /// /// let records = vec![ /// Record { count: 7, animal: "penguin", description: "happy" }, /// Record { count: 10, animal: "cheetah", description: "fast" }, /// Record { count: 4, animal: "armadillo", description: "armored" }, /// Record { count: 9, animal: "platypus", description: "unique" }, /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,description /// 7,penguin,happy /// 10,cheetah,fast /// 4,armadillo,armored /// 9,platypus,unique /// "); /// # } /// ``` /// /// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html pub struct Writer<W: Write, E: Encodable> { csv: csv::Writer<W>, first_row: bool, record_type: PhantomData<E>, } impl<E: Encodable> Writer<File, E> { /// Creates a new typed CSV writer that writes to the file path given. /// /// The file is created if it does not already exist and is truncated /// otherwise. pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> { Ok(Self::from_csv_writer(csv::Writer::from_file(path)?)) } } impl<W: Write, E: Encodable> Writer<W, E> { /// Creates a new typed CSV writer that writes to the `io::Write` given. /// /// Note that the writer is buffered for you automatically. pub fn from_writer(w: W) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_writer(w)) } /// Creates a new typed CSV writer that writes to the CSV writer given. /// /// This lets you specify options to the underlying CSV writer (e.g. to use /// a different delimiter). pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> { Writer { csv: w, first_row: true, record_type: PhantomData, } } /// Creates a new typed CSV writer that writes to the buffer given. /// /// This lets you specify your own buffered writer (e.g., use a different /// capacity). All other constructors wrap the writer given in a buffer /// with default capacity. pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_buffer(buf)) } } impl<E: Encodable> Writer<Vec<u8>, E> { /// Creates a new CSV writer that writes to an in memory buffer. At any /// time, `as_string` or `as_bytes` can be called to retrieve the /// cumulative CSV data. pub fn from_memory() -> Writer<Vec<u8>, E> { Self::from_csv_writer(csv::Writer::from_memory()) } /// Returns the written CSV data as a string. pub fn as_string(&mut self) -> &str { self.csv.as_string() } /// Returns the encoded CSV data as raw bytes. pub fn as_bytes(&mut self) -> &[u8] { self.csv.as_bytes() } /// Convert the Writer into a string of written CSV data pub fn into_string(self) -> String { self.csv.into_string() } /// Convert the Writer into a vector of encoded CSV bytes. pub fn into_bytes(self) -> Vec<u8> { self.csv.into_bytes() } } impl<W: Write, E: Encodable> Writer<W, E> { /// Writes a record by encoding any `Encodable` value. /// /// When the first record is encoded, the headers (the field names in the /// struct) are written prior to encoding the record. /// /// The type that is being encoded into should correspond to *one full CSV /// record*. This can be a single struct, or arbitrarily nested tuples, /// arrays, Vecs, and structs, as long as all scalar types (integers, /// floats, characters, strings, collections containing one scalar, and /// enums with 0 or 1 scalar arguments) are fields in structs. Enums with /// zero arguments encode to their name, while enums of one argument encode /// to their constituent value. `Option` types are also supported. (`None` /// encodes to an empty field.) /// /// Note that single-element tuple structs (the newtype pattern) are /// supported. Unfortunately, to provide this functionality, a heuristic is /// necessary to differentiate field names in normal structs from those in /// tuple structs. As a result, field names in normal structs should not be /// of the form `_field{}` where `{}` is its position in the struct. /// /// # Example /// /// This example encodes a zoo animals with may not have a description. /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Count(usize); /// /// #[derive(RustcEncodable)] /// enum Group { /// Bird, /// Mammal, /// } /// /// #[derive(RustcEncodable)] /// struct Part1 { /// count: Count, /// animal: &'static str, /// } /// /// #[derive(RustcEncodable)] /// struct Part2 { /// group: Group, /// description: Option<&'static str>, /// } /// /// let records = vec![ /// ( /// Part1 { count: Count(7), animal: "penguin" }, /// Part2 { group: Group::Bird, description: Some("happy") }, /// ), /// ( /// Part1 { count: Count(10), animal: "cheetah" }, /// Part2 { group: Group::Mammal, description: Some("fast") }, /// ), /// ( /// Part1 { count: Count(4), animal: "armadillo" }, /// Part2 { group: Group::Mammal, description: Some("armored") },
/// ), /// ( /// Part1 { count: Count(9), animal: "platypus" }, /// Part2 { group: Group::Mammal, description: None }, /// ), /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,group,description /// 7,penguin,Bird,happy /// 10,cheetah,Mammal,fast /// 4,armadillo,Mammal,armored /// 9,platypus,Mammal, /// "); /// # } /// ``` pub fn encode(&mut self, row: E) -> csv::Result<()> { // Write headers if this is the first row. if self.first_row { let mut field_names_encoder = FieldNamesEncoder::new(); row.encode(&mut field_names_encoder)?; self.csv.write(field_names_encoder.into_field_names().into_iter())?; self.first_row = false; } // Write row. let mut erecord = csv::Encoded::new(); row.encode(&mut erecord)?; self.csv.write(erecord.unwrap().into_iter()) } /// Flushes the underlying buffer. pub fn flush(&mut self) -> Result<()> { self.csv.flush() } } #[cfg(test)] mod tests { use super::Writer; #[derive(RustcEncodable)] struct SimpleStruct { a: usize, b: usize, } #[test] fn test_struct() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; w.encode(s1).unwrap(); let s2 = SimpleStruct { a: 3, b: 4 }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n"); } #[test] fn test_tuple_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode((s1, s2)).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode((s3, s4)).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_array_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode([s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode([s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_vec_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode(vec![s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode(vec![s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_nested_tuples_of_structs() { let mut w = Writer::from_memory(); w.encode((SimpleStruct { a: 0, b: 1 }, (SimpleStruct { a: 2, b: 3 }), (SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 })))) .unwrap(); w.encode((SimpleStruct { a: 8, b: 9 }, (SimpleStruct { a: 10, b: 11 }), (SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 })))) .unwrap(); assert_eq!(w.as_string(), "a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n"); } #[derive(RustcEncodable)] struct StructWithLengthOneSeqs { a: [usize; 1], b: Vec<usize>, c: (usize), } #[test] fn test_struct_with_len_one_seqs() { let mut w = Writer::from_memory(); let s1 = StructWithLengthOneSeqs { a: [0], b: vec![1], c: (2), }; w.encode(s1).unwrap(); let s2 = StructWithLengthOneSeqs { a: [3], b: vec![4], c: (5), }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n"); } #[derive(RustcEncodable)] struct StructOfStruct { p: SimpleStruct, q: (usize, usize), } #[should_panic] #[test] fn test_struct_of_struct() { let mut w = Writer::from_memory(); let s = StructOfStruct { p: SimpleStruct { a: 0, b: 1 }, q: (2, 3), }; w.encode(s).unwrap(); } #[derive(RustcEncodable)] struct StructWithLongerSeq { a: [usize; 2], } #[should_panic] #[test] fn test_struct_with_longer_seq() { let mut w = Writer::from_memory(); let s = StructWithLongerSeq { a: [0, 1] }; w.encode(s).unwrap(); } #[should_panic] #[test] fn test_vec() { let mut w = Writer::from_memory(); let array = vec![0, 1]; w.encode(array).unwrap(); } }
random_line_split
mod.rs
mod field_names_encoder; use self::field_names_encoder::FieldNamesEncoder; use csv::{self, Result}; use rustc_serialize::Encodable; use std::fs::File; use std::io::{BufWriter, Write}; use std::marker::PhantomData; use std::path::Path; /// A CSV writer that automatically writes the headers. /// /// This writer provides a convenient interface for encoding CSV data. While /// creating CSV data is much easier than parsing it, having a writer can be /// convenient because it can handle quoting for you automatically. Moreover, /// this particular writer supports [`rustc_serialize::Encodable`][Encodable] /// types, which makes it easy to write your custom types as CSV records and /// automatically generate headers. /// /// All CSV data produced by this writer, with default options, conforms with /// [RFC 4180](http://tools.ietf.org/html/rfc4180). /// /// One slight deviation is that records with a single empty field are always /// encoded as `""`. This ensures that the record is not skipped since some /// CSV parsers will ignore consecutive record terminators (like the one in /// this crate). /// /// If you don't care want the writer to automatically write the header row, /// use the [`csv`](https://github.com/BurntSushi/rust-csv) crate instead. /// /// # Example /// /// Here's an example that encodes a zoo of animals: /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Record { /// count: usize, /// animal: &'static str, /// description: &'static str, /// } /// /// let records = vec![ /// Record { count: 7, animal: "penguin", description: "happy" }, /// Record { count: 10, animal: "cheetah", description: "fast" }, /// Record { count: 4, animal: "armadillo", description: "armored" }, /// Record { count: 9, animal: "platypus", description: "unique" }, /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,description /// 7,penguin,happy /// 10,cheetah,fast /// 4,armadillo,armored /// 9,platypus,unique /// "); /// # } /// ``` /// /// [Encodable]: https://doc.rust-lang.org/rustc-serialize/rustc_serialize/trait.Encodable.html pub struct Writer<W: Write, E: Encodable> { csv: csv::Writer<W>, first_row: bool, record_type: PhantomData<E>, } impl<E: Encodable> Writer<File, E> { /// Creates a new typed CSV writer that writes to the file path given. /// /// The file is created if it does not already exist and is truncated /// otherwise. pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Writer<File, E>> { Ok(Self::from_csv_writer(csv::Writer::from_file(path)?)) } } impl<W: Write, E: Encodable> Writer<W, E> { /// Creates a new typed CSV writer that writes to the `io::Write` given. /// /// Note that the writer is buffered for you automatically. pub fn from_writer(w: W) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_writer(w)) } /// Creates a new typed CSV writer that writes to the CSV writer given. /// /// This lets you specify options to the underlying CSV writer (e.g. to use /// a different delimiter). pub fn from_csv_writer(w: csv::Writer<W>) -> Writer<W, E> { Writer { csv: w, first_row: true, record_type: PhantomData, } } /// Creates a new typed CSV writer that writes to the buffer given. /// /// This lets you specify your own buffered writer (e.g., use a different /// capacity). All other constructors wrap the writer given in a buffer /// with default capacity. pub fn from_buffer(buf: BufWriter<W>) -> Writer<W, E> { Self::from_csv_writer(csv::Writer::from_buffer(buf)) } } impl<E: Encodable> Writer<Vec<u8>, E> { /// Creates a new CSV writer that writes to an in memory buffer. At any /// time, `as_string` or `as_bytes` can be called to retrieve the /// cumulative CSV data. pub fn from_memory() -> Writer<Vec<u8>, E> { Self::from_csv_writer(csv::Writer::from_memory()) } /// Returns the written CSV data as a string. pub fn as_string(&mut self) -> &str { self.csv.as_string() } /// Returns the encoded CSV data as raw bytes. pub fn as_bytes(&mut self) -> &[u8] { self.csv.as_bytes() } /// Convert the Writer into a string of written CSV data pub fn into_string(self) -> String { self.csv.into_string() } /// Convert the Writer into a vector of encoded CSV bytes. pub fn into_bytes(self) -> Vec<u8>
} impl<W: Write, E: Encodable> Writer<W, E> { /// Writes a record by encoding any `Encodable` value. /// /// When the first record is encoded, the headers (the field names in the /// struct) are written prior to encoding the record. /// /// The type that is being encoded into should correspond to *one full CSV /// record*. This can be a single struct, or arbitrarily nested tuples, /// arrays, Vecs, and structs, as long as all scalar types (integers, /// floats, characters, strings, collections containing one scalar, and /// enums with 0 or 1 scalar arguments) are fields in structs. Enums with /// zero arguments encode to their name, while enums of one argument encode /// to their constituent value. `Option` types are also supported. (`None` /// encodes to an empty field.) /// /// Note that single-element tuple structs (the newtype pattern) are /// supported. Unfortunately, to provide this functionality, a heuristic is /// necessary to differentiate field names in normal structs from those in /// tuple structs. As a result, field names in normal structs should not be /// of the form `_field{}` where `{}` is its position in the struct. /// /// # Example /// /// This example encodes a zoo animals with may not have a description. /// /// ```rust /// extern crate rustc_serialize; /// # extern crate typed_csv; /// # fn main() { /// /// #[derive(RustcEncodable)] /// struct Count(usize); /// /// #[derive(RustcEncodable)] /// enum Group { /// Bird, /// Mammal, /// } /// /// #[derive(RustcEncodable)] /// struct Part1 { /// count: Count, /// animal: &'static str, /// } /// /// #[derive(RustcEncodable)] /// struct Part2 { /// group: Group, /// description: Option<&'static str>, /// } /// /// let records = vec![ /// ( /// Part1 { count: Count(7), animal: "penguin" }, /// Part2 { group: Group::Bird, description: Some("happy") }, /// ), /// ( /// Part1 { count: Count(10), animal: "cheetah" }, /// Part2 { group: Group::Mammal, description: Some("fast") }, /// ), /// ( /// Part1 { count: Count(4), animal: "armadillo" }, /// Part2 { group: Group::Mammal, description: Some("armored") }, /// ), /// ( /// Part1 { count: Count(9), animal: "platypus" }, /// Part2 { group: Group::Mammal, description: None }, /// ), /// ]; /// /// let mut wtr = typed_csv::Writer::from_memory(); /// for record in records.into_iter() { /// wtr.encode(record).unwrap(); /// } /// /// assert_eq!(wtr.as_string(), "\ /// count,animal,group,description /// 7,penguin,Bird,happy /// 10,cheetah,Mammal,fast /// 4,armadillo,Mammal,armored /// 9,platypus,Mammal, /// "); /// # } /// ``` pub fn encode(&mut self, row: E) -> csv::Result<()> { // Write headers if this is the first row. if self.first_row { let mut field_names_encoder = FieldNamesEncoder::new(); row.encode(&mut field_names_encoder)?; self.csv.write(field_names_encoder.into_field_names().into_iter())?; self.first_row = false; } // Write row. let mut erecord = csv::Encoded::new(); row.encode(&mut erecord)?; self.csv.write(erecord.unwrap().into_iter()) } /// Flushes the underlying buffer. pub fn flush(&mut self) -> Result<()> { self.csv.flush() } } #[cfg(test)] mod tests { use super::Writer; #[derive(RustcEncodable)] struct SimpleStruct { a: usize, b: usize, } #[test] fn test_struct() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; w.encode(s1).unwrap(); let s2 = SimpleStruct { a: 3, b: 4 }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b\n0,1\n3,4\n"); } #[test] fn test_tuple_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode((s1, s2)).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode((s3, s4)).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_array_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode([s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode([s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_vec_of_structs() { let mut w = Writer::from_memory(); let s1 = SimpleStruct { a: 0, b: 1 }; let s2 = SimpleStruct { a: 2, b: 3 }; w.encode(vec![s1, s2]).unwrap(); let s3 = SimpleStruct { a: 4, b: 5 }; let s4 = SimpleStruct { a: 6, b: 7 }; w.encode(vec![s3, s4]).unwrap(); assert_eq!(w.as_string(), "a,b,a,b\n0,1,2,3\n4,5,6,7\n"); } #[test] fn test_nested_tuples_of_structs() { let mut w = Writer::from_memory(); w.encode((SimpleStruct { a: 0, b: 1 }, (SimpleStruct { a: 2, b: 3 }), (SimpleStruct { a: 4, b: 5 }, (SimpleStruct { a: 6, b: 7 })))) .unwrap(); w.encode((SimpleStruct { a: 8, b: 9 }, (SimpleStruct { a: 10, b: 11 }), (SimpleStruct { a: 12, b: 13 }, (SimpleStruct { a: 14, b: 15 })))) .unwrap(); assert_eq!(w.as_string(), "a,b,a,b,a,b,a,b\n0,1,2,3,4,5,6,7\n8,9,10,11,12,13,14,15\n"); } #[derive(RustcEncodable)] struct StructWithLengthOneSeqs { a: [usize; 1], b: Vec<usize>, c: (usize), } #[test] fn test_struct_with_len_one_seqs() { let mut w = Writer::from_memory(); let s1 = StructWithLengthOneSeqs { a: [0], b: vec![1], c: (2), }; w.encode(s1).unwrap(); let s2 = StructWithLengthOneSeqs { a: [3], b: vec![4], c: (5), }; w.encode(s2).unwrap(); assert_eq!(w.as_string(), "a,b,c\n0,1,2\n3,4,5\n"); } #[derive(RustcEncodable)] struct StructOfStruct { p: SimpleStruct, q: (usize, usize), } #[should_panic] #[test] fn test_struct_of_struct() { let mut w = Writer::from_memory(); let s = StructOfStruct { p: SimpleStruct { a: 0, b: 1 }, q: (2, 3), }; w.encode(s).unwrap(); } #[derive(RustcEncodable)] struct StructWithLongerSeq { a: [usize; 2], } #[should_panic] #[test] fn test_struct_with_longer_seq() { let mut w = Writer::from_memory(); let s = StructWithLongerSeq { a: [0, 1] }; w.encode(s).unwrap(); } #[should_panic] #[test] fn test_vec() { let mut w = Writer::from_memory(); let array = vec![0, 1]; w.encode(array).unwrap(); } }
{ self.csv.into_bytes() }
identifier_body
http_ece.rs
use base64::{self, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::message::WebPushPayload; use ring::rand::SecureRandom; use ring::{aead::{self, BoundKey}, agreement, hkdf, rand}; use crate::vapid::VapidSignature; pub enum ContentEncoding { AesGcm, Aes128Gcm, } pub struct HttpEce<'a> { peer_public_key: &'a [u8], peer_secret: &'a [u8], encoding: ContentEncoding, rng: rand::SystemRandom, vapid_signature: Option<VapidSignature>, } #[derive(Debug, PartialEq)] struct EceKey<T: core::fmt::Debug + PartialEq>(T); impl hkdf::KeyType for EceKey<usize> { fn len(&self) -> usize { self.0 } } impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> { fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self { let mut r = vec![0u8; okm.len().0]; okm.fill(&mut r).unwrap(); EceKey(r) } } #[derive(Debug, PartialEq, Default)] struct EceNonce { used: bool, nonce: Vec<u8>, } impl EceNonce { fn fill(&mut self, nonce: Vec<u8>) { self.nonce = nonce; self.used = false; } } impl aead::NonceSequence for EceNonce { fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> { if self.used { return Err(ring::error::Unspecified); } let mut nonce = [0u8; 12]; for (i, n) in self.nonce.iter().enumerate() { if i >= 12 { return Err(ring::error::Unspecified); } nonce[i] = *n; } self.used = true; Ok(aead::Nonce::assume_unique_for_key(nonce)) } } impl<'a> HttpEce<'a> { /// Create a new encryptor. The content encoding has preliminary support for /// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but /// currently using it will return an error when trying to encrypt. There is /// no real support yet for the encoding in web browsers. /// /// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from /// browser subscription info. pub fn new( encoding: ContentEncoding, peer_public_key: &'a [u8], peer_secret: &'a [u8], vapid_signature: Option<VapidSignature>, ) -> HttpEce<'a> { HttpEce { rng: rand::SystemRandom::new(), peer_public_key: peer_public_key, peer_secret: peer_secret, encoding: encoding, vapid_signature: vapid_signature, } } /// Encrypts a payload. The maximum length for the payload is 3800 /// characters, which is the largest that works with Google's and Mozilla's /// push servers. pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> { if content.len() > 3052 { return Err(WebPushError::PayloadTooLarge); } let private_key = agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?; let public_key = private_key.compute_public_key()?; let mut salt_bytes = [0u8; 16]; self.rng.fill(&mut salt_bytes)?; let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key); agreement::agree_ephemeral( private_key, &peer_public_key, WebPushError::Unspecified, |shared_secret| match self.encoding { ContentEncoding::AesGcm => { let mut payload = vec![0; 3054]; front_pad(content, &mut payload); self.aes_gcm( shared_secret, public_key.as_ref(), &salt_bytes, &mut payload, )?; Ok(WebPushPayload { content: payload.to_vec(), crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes), content_encoding: "aesgcm", }) } ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented), }, )
} pub fn generate_headers( &self, public_key: &'a [u8], salt: &'a [u8], ) -> Vec<(&'static str, String)> { let mut crypto_headers = Vec::new(); let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD)); if let Some(ref signature) = self.vapid_signature { crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k); let sig_s: String = signature.into(); crypto_headers.push(("Authorization", sig_s)); }; crypto_headers.push(("Crypto-Key", crypto_key)); crypto_headers.push(( "Encryption", format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)), )); crypto_headers } /// The aesgcm encrypted content-encoding, draft 3. pub fn aes_gcm( &self, shared_secret: &'a [u8], as_public_key: &'a [u8], salt_bytes: &'a [u8], payload: &'a mut Vec<u8>, ) -> Result<(), WebPushError> { let mut context = Vec::with_capacity(140); context.extend_from_slice("P-256\0".as_bytes()); context.push((self.peer_public_key.len() >> 8) as u8); context.push((self.peer_public_key.len() & 0xff) as u8); context.extend_from_slice(self.peer_public_key); context.push((as_public_key.len() >> 8) as u8); context.push((as_public_key.len() & 0xff) as u8); context.extend_from_slice(as_public_key); let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret); let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes); let EceKey(prk) = client_auth_secret .extract(shared_secret) .expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32)) .unwrap() .into(); let mut cek_info = Vec::with_capacity(165); cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes()); cek_info.extend_from_slice(&context); let EceKey(content_encryption_key) = salt .extract(&prk) .expand(&[&cek_info], EceKey(16)) .unwrap() .into(); let mut nonce_info = Vec::with_capacity(164); nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes()); nonce_info.extend_from_slice(&context); let EceKey(nonce_bytes) = salt .extract(&prk) .expand(&[&nonce_info], EceKey(12)) .unwrap() .into(); let mut nonce = EceNonce::default(); nonce.fill(nonce_bytes); let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?; let mut sealing_key = aead::SealingKey::new(unbound_key, nonce); sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?; Ok(()) } } fn front_pad(payload: &[u8], output: &mut [u8]) { let payload_len = payload.len(); let max_payload = output.len() - 2; let padding_size = max_payload - payload.len(); output[0] = (padding_size >> 8) as u8; output[1] = (padding_size & 0xff) as u8; for i in 0..payload_len { output[padding_size + i + 2] = payload[i]; } } #[cfg(test)] mod tests { use base64::{self, URL_SAFE, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::http_ece::{front_pad, ContentEncoding, HttpEce}; use crate::vapid::VapidSignature; #[test] fn test_payload_too_big() { let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let content = [0u8; 3801]; assert_eq!( Err(WebPushError::PayloadTooLarge), http_ece.encrypt(&content) ); } #[test] fn test_aes128gcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None); let content = [0u8; 10]; assert_eq!( Err(WebPushError::NotImplemented), http_ece.encrypt(&content) ); } #[test] fn test_aesgcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let shared_secret = base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE) .unwrap(); let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let mut payload = "This is test data. XXX".as_bytes().to_vec(); http_ece .aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload) .unwrap(); assert_eq!( "tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44", base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD) ); } #[test] fn test_headers_with_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let vapid_signature = VapidSignature { auth_t: String::from("foo"), auth_k: String::from("bar"), }; let http_ece = HttpEce::new( ContentEncoding::AesGcm, &p256dh, &auth, Some(vapid_signature), ); assert_eq!( vec![ ("Authorization", "WebPush foo".to_string()), ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_headers_without_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); assert_eq!( vec![ ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_front_pad() { // writes the padding count in the beginning, zeroes, content and again space for the encryption tag let content = "naukio"; let mut output = [0u8; 30]; front_pad(content.as_bytes(), &mut output); assert_eq!( vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111], output ); } }
random_line_split
http_ece.rs
use base64::{self, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::message::WebPushPayload; use ring::rand::SecureRandom; use ring::{aead::{self, BoundKey}, agreement, hkdf, rand}; use crate::vapid::VapidSignature; pub enum ContentEncoding { AesGcm, Aes128Gcm, } pub struct HttpEce<'a> { peer_public_key: &'a [u8], peer_secret: &'a [u8], encoding: ContentEncoding, rng: rand::SystemRandom, vapid_signature: Option<VapidSignature>, } #[derive(Debug, PartialEq)] struct EceKey<T: core::fmt::Debug + PartialEq>(T); impl hkdf::KeyType for EceKey<usize> { fn len(&self) -> usize { self.0 } } impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> { fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self { let mut r = vec![0u8; okm.len().0]; okm.fill(&mut r).unwrap(); EceKey(r) } } #[derive(Debug, PartialEq, Default)] struct EceNonce { used: bool, nonce: Vec<u8>, } impl EceNonce { fn fill(&mut self, nonce: Vec<u8>) { self.nonce = nonce; self.used = false; } } impl aead::NonceSequence for EceNonce { fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> { if self.used { return Err(ring::error::Unspecified); } let mut nonce = [0u8; 12]; for (i, n) in self.nonce.iter().enumerate() { if i >= 12 { return Err(ring::error::Unspecified); } nonce[i] = *n; } self.used = true; Ok(aead::Nonce::assume_unique_for_key(nonce)) } } impl<'a> HttpEce<'a> { /// Create a new encryptor. The content encoding has preliminary support for /// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but /// currently using it will return an error when trying to encrypt. There is /// no real support yet for the encoding in web browsers. /// /// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from /// browser subscription info. pub fn new( encoding: ContentEncoding, peer_public_key: &'a [u8], peer_secret: &'a [u8], vapid_signature: Option<VapidSignature>, ) -> HttpEce<'a> { HttpEce { rng: rand::SystemRandom::new(), peer_public_key: peer_public_key, peer_secret: peer_secret, encoding: encoding, vapid_signature: vapid_signature, } } /// Encrypts a payload. The maximum length for the payload is 3800 /// characters, which is the largest that works with Google's and Mozilla's /// push servers. pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> { if content.len() > 3052 { return Err(WebPushError::PayloadTooLarge); } let private_key = agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?; let public_key = private_key.compute_public_key()?; let mut salt_bytes = [0u8; 16]; self.rng.fill(&mut salt_bytes)?; let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key); agreement::agree_ephemeral( private_key, &peer_public_key, WebPushError::Unspecified, |shared_secret| match self.encoding { ContentEncoding::AesGcm => { let mut payload = vec![0; 3054]; front_pad(content, &mut payload); self.aes_gcm( shared_secret, public_key.as_ref(), &salt_bytes, &mut payload, )?; Ok(WebPushPayload { content: payload.to_vec(), crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes), content_encoding: "aesgcm", }) } ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented), }, ) } pub fn generate_headers( &self, public_key: &'a [u8], salt: &'a [u8], ) -> Vec<(&'static str, String)> { let mut crypto_headers = Vec::new(); let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD)); if let Some(ref signature) = self.vapid_signature { crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k); let sig_s: String = signature.into(); crypto_headers.push(("Authorization", sig_s)); }; crypto_headers.push(("Crypto-Key", crypto_key)); crypto_headers.push(( "Encryption", format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)), )); crypto_headers } /// The aesgcm encrypted content-encoding, draft 3. pub fn aes_gcm( &self, shared_secret: &'a [u8], as_public_key: &'a [u8], salt_bytes: &'a [u8], payload: &'a mut Vec<u8>, ) -> Result<(), WebPushError> { let mut context = Vec::with_capacity(140); context.extend_from_slice("P-256\0".as_bytes()); context.push((self.peer_public_key.len() >> 8) as u8); context.push((self.peer_public_key.len() & 0xff) as u8); context.extend_from_slice(self.peer_public_key); context.push((as_public_key.len() >> 8) as u8); context.push((as_public_key.len() & 0xff) as u8); context.extend_from_slice(as_public_key); let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret); let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes); let EceKey(prk) = client_auth_secret .extract(shared_secret) .expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32)) .unwrap() .into(); let mut cek_info = Vec::with_capacity(165); cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes()); cek_info.extend_from_slice(&context); let EceKey(content_encryption_key) = salt .extract(&prk) .expand(&[&cek_info], EceKey(16)) .unwrap() .into(); let mut nonce_info = Vec::with_capacity(164); nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes()); nonce_info.extend_from_slice(&context); let EceKey(nonce_bytes) = salt .extract(&prk) .expand(&[&nonce_info], EceKey(12)) .unwrap() .into(); let mut nonce = EceNonce::default(); nonce.fill(nonce_bytes); let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?; let mut sealing_key = aead::SealingKey::new(unbound_key, nonce); sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?; Ok(()) } } fn front_pad(payload: &[u8], output: &mut [u8]) { let payload_len = payload.len(); let max_payload = output.len() - 2; let padding_size = max_payload - payload.len(); output[0] = (padding_size >> 8) as u8; output[1] = (padding_size & 0xff) as u8; for i in 0..payload_len { output[padding_size + i + 2] = payload[i]; } } #[cfg(test)] mod tests { use base64::{self, URL_SAFE, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::http_ece::{front_pad, ContentEncoding, HttpEce}; use crate::vapid::VapidSignature; #[test] fn test_payload_too_big() { let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let content = [0u8; 3801]; assert_eq!( Err(WebPushError::PayloadTooLarge), http_ece.encrypt(&content) ); } #[test] fn test_aes128gcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None); let content = [0u8; 10]; assert_eq!( Err(WebPushError::NotImplemented), http_ece.encrypt(&content) ); } #[test] fn test_aesgcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let shared_secret = base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE) .unwrap(); let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let mut payload = "This is test data. XXX".as_bytes().to_vec(); http_ece .aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload) .unwrap(); assert_eq!( "tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44", base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD) ); } #[test] fn test_headers_with_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let vapid_signature = VapidSignature { auth_t: String::from("foo"), auth_k: String::from("bar"), }; let http_ece = HttpEce::new( ContentEncoding::AesGcm, &p256dh, &auth, Some(vapid_signature), ); assert_eq!( vec![ ("Authorization", "WebPush foo".to_string()), ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_headers_without_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); assert_eq!( vec![ ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn
() { // writes the padding count in the beginning, zeroes, content and again space for the encryption tag let content = "naukio"; let mut output = [0u8; 30]; front_pad(content.as_bytes(), &mut output); assert_eq!( vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111], output ); } }
test_front_pad
identifier_name
http_ece.rs
use base64::{self, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::message::WebPushPayload; use ring::rand::SecureRandom; use ring::{aead::{self, BoundKey}, agreement, hkdf, rand}; use crate::vapid::VapidSignature; pub enum ContentEncoding { AesGcm, Aes128Gcm, } pub struct HttpEce<'a> { peer_public_key: &'a [u8], peer_secret: &'a [u8], encoding: ContentEncoding, rng: rand::SystemRandom, vapid_signature: Option<VapidSignature>, } #[derive(Debug, PartialEq)] struct EceKey<T: core::fmt::Debug + PartialEq>(T); impl hkdf::KeyType for EceKey<usize> { fn len(&self) -> usize { self.0 } } impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> { fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self { let mut r = vec![0u8; okm.len().0]; okm.fill(&mut r).unwrap(); EceKey(r) } } #[derive(Debug, PartialEq, Default)] struct EceNonce { used: bool, nonce: Vec<u8>, } impl EceNonce { fn fill(&mut self, nonce: Vec<u8>) { self.nonce = nonce; self.used = false; } } impl aead::NonceSequence for EceNonce { fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> { if self.used { return Err(ring::error::Unspecified); } let mut nonce = [0u8; 12]; for (i, n) in self.nonce.iter().enumerate() { if i >= 12 { return Err(ring::error::Unspecified); } nonce[i] = *n; } self.used = true; Ok(aead::Nonce::assume_unique_for_key(nonce)) } } impl<'a> HttpEce<'a> { /// Create a new encryptor. The content encoding has preliminary support for /// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but /// currently using it will return an error when trying to encrypt. There is /// no real support yet for the encoding in web browsers. /// /// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from /// browser subscription info. pub fn new( encoding: ContentEncoding, peer_public_key: &'a [u8], peer_secret: &'a [u8], vapid_signature: Option<VapidSignature>, ) -> HttpEce<'a> { HttpEce { rng: rand::SystemRandom::new(), peer_public_key: peer_public_key, peer_secret: peer_secret, encoding: encoding, vapid_signature: vapid_signature, } } /// Encrypts a payload. The maximum length for the payload is 3800 /// characters, which is the largest that works with Google's and Mozilla's /// push servers. pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> { if content.len() > 3052 { return Err(WebPushError::PayloadTooLarge); } let private_key = agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?; let public_key = private_key.compute_public_key()?; let mut salt_bytes = [0u8; 16]; self.rng.fill(&mut salt_bytes)?; let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key); agreement::agree_ephemeral( private_key, &peer_public_key, WebPushError::Unspecified, |shared_secret| match self.encoding { ContentEncoding::AesGcm => { let mut payload = vec![0; 3054]; front_pad(content, &mut payload); self.aes_gcm( shared_secret, public_key.as_ref(), &salt_bytes, &mut payload, )?; Ok(WebPushPayload { content: payload.to_vec(), crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes), content_encoding: "aesgcm", }) } ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented), }, ) } pub fn generate_headers( &self, public_key: &'a [u8], salt: &'a [u8], ) -> Vec<(&'static str, String)>
/// The aesgcm encrypted content-encoding, draft 3. pub fn aes_gcm( &self, shared_secret: &'a [u8], as_public_key: &'a [u8], salt_bytes: &'a [u8], payload: &'a mut Vec<u8>, ) -> Result<(), WebPushError> { let mut context = Vec::with_capacity(140); context.extend_from_slice("P-256\0".as_bytes()); context.push((self.peer_public_key.len() >> 8) as u8); context.push((self.peer_public_key.len() & 0xff) as u8); context.extend_from_slice(self.peer_public_key); context.push((as_public_key.len() >> 8) as u8); context.push((as_public_key.len() & 0xff) as u8); context.extend_from_slice(as_public_key); let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret); let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes); let EceKey(prk) = client_auth_secret .extract(shared_secret) .expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32)) .unwrap() .into(); let mut cek_info = Vec::with_capacity(165); cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes()); cek_info.extend_from_slice(&context); let EceKey(content_encryption_key) = salt .extract(&prk) .expand(&[&cek_info], EceKey(16)) .unwrap() .into(); let mut nonce_info = Vec::with_capacity(164); nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes()); nonce_info.extend_from_slice(&context); let EceKey(nonce_bytes) = salt .extract(&prk) .expand(&[&nonce_info], EceKey(12)) .unwrap() .into(); let mut nonce = EceNonce::default(); nonce.fill(nonce_bytes); let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?; let mut sealing_key = aead::SealingKey::new(unbound_key, nonce); sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?; Ok(()) } } fn front_pad(payload: &[u8], output: &mut [u8]) { let payload_len = payload.len(); let max_payload = output.len() - 2; let padding_size = max_payload - payload.len(); output[0] = (padding_size >> 8) as u8; output[1] = (padding_size & 0xff) as u8; for i in 0..payload_len { output[padding_size + i + 2] = payload[i]; } } #[cfg(test)] mod tests { use base64::{self, URL_SAFE, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::http_ece::{front_pad, ContentEncoding, HttpEce}; use crate::vapid::VapidSignature; #[test] fn test_payload_too_big() { let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let content = [0u8; 3801]; assert_eq!( Err(WebPushError::PayloadTooLarge), http_ece.encrypt(&content) ); } #[test] fn test_aes128gcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None); let content = [0u8; 10]; assert_eq!( Err(WebPushError::NotImplemented), http_ece.encrypt(&content) ); } #[test] fn test_aesgcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let shared_secret = base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE) .unwrap(); let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let mut payload = "This is test data. XXX".as_bytes().to_vec(); http_ece .aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload) .unwrap(); assert_eq!( "tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44", base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD) ); } #[test] fn test_headers_with_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let vapid_signature = VapidSignature { auth_t: String::from("foo"), auth_k: String::from("bar"), }; let http_ece = HttpEce::new( ContentEncoding::AesGcm, &p256dh, &auth, Some(vapid_signature), ); assert_eq!( vec![ ("Authorization", "WebPush foo".to_string()), ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_headers_without_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); assert_eq!( vec![ ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_front_pad() { // writes the padding count in the beginning, zeroes, content and again space for the encryption tag let content = "naukio"; let mut output = [0u8; 30]; front_pad(content.as_bytes(), &mut output); assert_eq!( vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111], output ); } }
{ let mut crypto_headers = Vec::new(); let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD)); if let Some(ref signature) = self.vapid_signature { crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k); let sig_s: String = signature.into(); crypto_headers.push(("Authorization", sig_s)); }; crypto_headers.push(("Crypto-Key", crypto_key)); crypto_headers.push(( "Encryption", format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)), )); crypto_headers }
identifier_body
http_ece.rs
use base64::{self, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::message::WebPushPayload; use ring::rand::SecureRandom; use ring::{aead::{self, BoundKey}, agreement, hkdf, rand}; use crate::vapid::VapidSignature; pub enum ContentEncoding { AesGcm, Aes128Gcm, } pub struct HttpEce<'a> { peer_public_key: &'a [u8], peer_secret: &'a [u8], encoding: ContentEncoding, rng: rand::SystemRandom, vapid_signature: Option<VapidSignature>, } #[derive(Debug, PartialEq)] struct EceKey<T: core::fmt::Debug + PartialEq>(T); impl hkdf::KeyType for EceKey<usize> { fn len(&self) -> usize { self.0 } } impl From<hkdf::Okm<'_, EceKey<usize>>> for EceKey<Vec<u8>> { fn from(okm: hkdf::Okm<EceKey<usize>>) -> Self { let mut r = vec![0u8; okm.len().0]; okm.fill(&mut r).unwrap(); EceKey(r) } } #[derive(Debug, PartialEq, Default)] struct EceNonce { used: bool, nonce: Vec<u8>, } impl EceNonce { fn fill(&mut self, nonce: Vec<u8>) { self.nonce = nonce; self.used = false; } } impl aead::NonceSequence for EceNonce { fn advance(&mut self) -> Result<aead::Nonce, ring::error::Unspecified> { if self.used { return Err(ring::error::Unspecified); } let mut nonce = [0u8; 12]; for (i, n) in self.nonce.iter().enumerate() { if i >= 12 { return Err(ring::error::Unspecified); } nonce[i] = *n; } self.used = true; Ok(aead::Nonce::assume_unique_for_key(nonce)) } } impl<'a> HttpEce<'a> { /// Create a new encryptor. The content encoding has preliminary support for /// Aes128Gcm, which is the 8th draft of the Encrypted Content-Encoding, but /// currently using it will return an error when trying to encrypt. There is /// no real support yet for the encoding in web browsers. /// /// `peer_public_key` is the `p256dh` and `peer_secret` the `auth` from /// browser subscription info. pub fn new( encoding: ContentEncoding, peer_public_key: &'a [u8], peer_secret: &'a [u8], vapid_signature: Option<VapidSignature>, ) -> HttpEce<'a> { HttpEce { rng: rand::SystemRandom::new(), peer_public_key: peer_public_key, peer_secret: peer_secret, encoding: encoding, vapid_signature: vapid_signature, } } /// Encrypts a payload. The maximum length for the payload is 3800 /// characters, which is the largest that works with Google's and Mozilla's /// push servers. pub fn encrypt(&self, content: &'a [u8]) -> Result<WebPushPayload, WebPushError> { if content.len() > 3052
let private_key = agreement::EphemeralPrivateKey::generate(&agreement::ECDH_P256, &self.rng)?; let public_key = private_key.compute_public_key()?; let mut salt_bytes = [0u8; 16]; self.rng.fill(&mut salt_bytes)?; let peer_public_key = agreement::UnparsedPublicKey::new(&agreement::ECDH_P256, self.peer_public_key); agreement::agree_ephemeral( private_key, &peer_public_key, WebPushError::Unspecified, |shared_secret| match self.encoding { ContentEncoding::AesGcm => { let mut payload = vec![0; 3054]; front_pad(content, &mut payload); self.aes_gcm( shared_secret, public_key.as_ref(), &salt_bytes, &mut payload, )?; Ok(WebPushPayload { content: payload.to_vec(), crypto_headers: self.generate_headers(public_key.as_ref(), &salt_bytes), content_encoding: "aesgcm", }) } ContentEncoding::Aes128Gcm => Err(WebPushError::NotImplemented), }, ) } pub fn generate_headers( &self, public_key: &'a [u8], salt: &'a [u8], ) -> Vec<(&'static str, String)> { let mut crypto_headers = Vec::new(); let mut crypto_key = format!("dh={}", base64::encode_config(public_key, URL_SAFE_NO_PAD)); if let Some(ref signature) = self.vapid_signature { crypto_key = format!("{}; p256ecdsa={}", crypto_key, signature.auth_k); let sig_s: String = signature.into(); crypto_headers.push(("Authorization", sig_s)); }; crypto_headers.push(("Crypto-Key", crypto_key)); crypto_headers.push(( "Encryption", format!("salt={}", base64::encode_config(&salt, URL_SAFE_NO_PAD)), )); crypto_headers } /// The aesgcm encrypted content-encoding, draft 3. pub fn aes_gcm( &self, shared_secret: &'a [u8], as_public_key: &'a [u8], salt_bytes: &'a [u8], payload: &'a mut Vec<u8>, ) -> Result<(), WebPushError> { let mut context = Vec::with_capacity(140); context.extend_from_slice("P-256\0".as_bytes()); context.push((self.peer_public_key.len() >> 8) as u8); context.push((self.peer_public_key.len() & 0xff) as u8); context.extend_from_slice(self.peer_public_key); context.push((as_public_key.len() >> 8) as u8); context.push((as_public_key.len() & 0xff) as u8); context.extend_from_slice(as_public_key); let client_auth_secret = hkdf::Salt::new(hkdf::HKDF_SHA256, &self.peer_secret); let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt_bytes); let EceKey(prk) = client_auth_secret .extract(shared_secret) .expand(&[&"Content-Encoding: auth\0".as_bytes()], EceKey(32)) .unwrap() .into(); let mut cek_info = Vec::with_capacity(165); cek_info.extend_from_slice("Content-Encoding: aesgcm\0".as_bytes()); cek_info.extend_from_slice(&context); let EceKey(content_encryption_key) = salt .extract(&prk) .expand(&[&cek_info], EceKey(16)) .unwrap() .into(); let mut nonce_info = Vec::with_capacity(164); nonce_info.extend_from_slice("Content-Encoding: nonce\0".as_bytes()); nonce_info.extend_from_slice(&context); let EceKey(nonce_bytes) = salt .extract(&prk) .expand(&[&nonce_info], EceKey(12)) .unwrap() .into(); let mut nonce = EceNonce::default(); nonce.fill(nonce_bytes); let unbound_key = aead::UnboundKey::new(&aead::AES_128_GCM, &content_encryption_key)?; let mut sealing_key = aead::SealingKey::new(unbound_key, nonce); sealing_key.seal_in_place_append_tag(aead::Aad::empty(), payload)?; Ok(()) } } fn front_pad(payload: &[u8], output: &mut [u8]) { let payload_len = payload.len(); let max_payload = output.len() - 2; let padding_size = max_payload - payload.len(); output[0] = (padding_size >> 8) as u8; output[1] = (padding_size & 0xff) as u8; for i in 0..payload_len { output[padding_size + i + 2] = payload[i]; } } #[cfg(test)] mod tests { use base64::{self, URL_SAFE, URL_SAFE_NO_PAD}; use crate::error::WebPushError; use crate::http_ece::{front_pad, ContentEncoding, HttpEce}; use crate::vapid::VapidSignature; #[test] fn test_payload_too_big() { let p256dh = base64::decode_config("BLMaF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fj5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let content = [0u8; 3801]; assert_eq!( Err(WebPushError::PayloadTooLarge), http_ece.encrypt(&content) ); } #[test] fn test_aes128gcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::Aes128Gcm, &p256dh, &auth, None); let content = [0u8; 10]; assert_eq!( Err(WebPushError::NotImplemented), http_ece.encrypt(&content) ); } #[test] fn test_aesgcm() { let p256dh = base64::decode_config("BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); let shared_secret = base64::decode_config("9vcttSQ8tq-Wi_lLQ_xA37tkYssMtJsdY6xENG5f1sE=", URL_SAFE) .unwrap(); let as_pubkey = base64::decode_config("BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let mut payload = "This is test data. XXX".as_bytes().to_vec(); http_ece .aes_gcm(&shared_secret, &as_pubkey, &salt_bytes, &mut payload) .unwrap(); assert_eq!( "tmE7-emq6iasohjXNMue0i0vn5o7EIOyP-bKyDoM1teHLcLtg44", base64::encode_config(&payload.to_vec(), URL_SAFE_NO_PAD) ); } #[test] fn test_headers_with_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let vapid_signature = VapidSignature { auth_t: String::from("foo"), auth_k: String::from("bar"), }; let http_ece = HttpEce::new( ContentEncoding::AesGcm, &p256dh, &auth, Some(vapid_signature), ); assert_eq!( vec![ ("Authorization", "WebPush foo".to_string()), ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs; p256ecdsa=bar".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_headers_without_vapid() { let as_pubkey = base64::decode_config( "BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs=", URL_SAFE ).unwrap(); let salt_bytes = base64::decode_config("YMcMuxqRkchXwy7vMwNl1Q==", URL_SAFE).unwrap(); let p256dh = base64::decode_config( "BLMbF9ffKBiWQLCKvTHb6LO8Nb6dcUh6TItC455vu2kElga6PQvUmaFyCdykxY2nOSSL3yKgfbmFLRTUaGv4yV8", URL_SAFE ).unwrap(); let auth = base64::decode_config("xS03Fi5ErfTNH_l9WHE9Ig", URL_SAFE).unwrap(); let http_ece = HttpEce::new(ContentEncoding::AesGcm, &p256dh, &auth, None); assert_eq!( vec![ ("Crypto-Key", "dh=BBXpqeMbtt1iwSoYzs7uRL-QVSKTAuAPrunJoNyW2wMKeVBUyNFCqbkmpVTZOVbqWpwpr_-6TpJvk1qT8T-iOYs".to_string()), ("Encryption", "salt=YMcMuxqRkchXwy7vMwNl1Q".to_string())], http_ece.generate_headers(&as_pubkey, &salt_bytes)) } #[test] fn test_front_pad() { // writes the padding count in the beginning, zeroes, content and again space for the encryption tag let content = "naukio"; let mut output = [0u8; 30]; front_pad(content.as_bytes(), &mut output); assert_eq!( vec![0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110, 97, 117, 107, 105, 111], output ); } }
{ return Err(WebPushError::PayloadTooLarge); }
conditional_block
read_pool.rs
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0. use futures::channel::oneshot; use futures::future::TryFutureExt; use prometheus::IntGauge; use std::future::Future; use std::sync::{Arc, Mutex}; use thiserror::Error; use yatp::pool::Remote; use yatp::queue::Extras; use yatp::task::future::TaskCell; /// A read pool. /// This is a wrapper around a yatp pool. /// It is used to limit the number of concurrent reads. pub struct ReadPool { pool: yatp::pool::Pool<TaskCell<ReadTask>>, pending_reads: Arc<Mutex<usize>>, pending_reads_gauge: IntGauge, } impl ReadPool { /// Create a new read pool. /// `max_concurrent_reads` is the maximum number of concurrent reads. /// `remote` is the remote to use for the pool. /// `extras` are the extras to use for the pool. /// `pending_reads_gauge` is the gauge to use to track the number of pending reads. /// `pending_reads_gauge` is the gauge to use to track the number of pending reads. pub fn new( max_concurrent_reads: usize, remote: Remote, extras: Extras, pending_reads_gauge: IntGauge, ) -> Self { let pool = yatp::pool::Pool::new( max_concurrent_reads, remote, extras, ); Self { pool, pending_reads: Arc::new(Mutex::new(0)), pending_reads_gauge, } } pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()> where F: Future<Output = ()> + Send +'static, { let (tx, rx) = oneshot::channel(); let f = f.map(|_| ()).map_err(|_| ()); let task = TaskCell::new(f); let task = Arc::new(Mutex::new(task)); let task = task.clone(); let task = self.pool.spawn(Remote::new(move |_| { let task = task.lock().unwrap(); task.run() })); self.read_pool_size.inc(); task.unwrap().map(move |_| { self.read_pool_size.dec(); tx.send(()).unwrap(); }); rx } } impl ReadPool { pub fn handle(&self) -> ReadPoolHandle { match self { ReadPool::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } => ReadPoolHandle::FuturePools { read_pool_high: read_pool_high.clone(), read_pool_normal: read_pool_normal.clone(), read_pool_low: read_pool_low.clone(), }, ReadPool::Yatp { pool, running_tasks, max_tasks, pool_size, } => ReadPoolHandle::Yatp { remote: pool.remote().clone(), running_tasks: running_tasks.clone(), max_tasks: *max_tasks, pool_size: *pool_size, }, } } } #[derive(Clone)] pub enum ReadPoolHandle { FuturePools { read_pool_high: FuturePool, read_pool_normal: FuturePool, read_pool_low: FuturePool, }, Yatp { remote: Remote<TaskCell>, running_tasks: IntGauge, max_tasks: usize, pool_size: usize, }, } impl ReadPoolHandle { pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError> where F: Future<Output = ()> + Send +'static, { match self { ReadPoolHandle::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } =>
ReadPoolHandle::Yatp { remote, running_tasks, max_tasks, .. } => { let running_tasks = running_tasks.clone(); // Note that the running task number limit is not strict. // If several tasks are spawned at the same time while the running task number // is close to the limit, they may all pass this check and the number of running // tasks may exceed the limit. if running_tasks.get() as usize >= *max_tasks { return Err(ReadPoolError::UnifiedReadPoolFull); } running_tasks.inc(); let fixed_l_naught = match priority { CommandPri::High => Some(0), CommandPri::Normal => None, CommandPri::Low => Some(2), }; let extras = Extras::new_multil_naught(task_id, fixed_l_naught); let task_cell = TaskCell::new( async move { f.await; running_tasks.dec(); }, extras, ); remote.spawn(task_cell); } } Ok(()) } pub fn spawn_handle<F, T>( &self, f: F, priority: CommandPri, task_id: u64, ) -> impl Future<Output = Result<T, ReadPoolError>> where F: Future<Output = T> + Send +'static, T: Send +'static, { let (tx, rx) = oneshot::channel::<T>(); let res = self.spawn( async move { let res = f.await; let _ = tx.send(res); }, priority, task_id, ); async move { res?; rx.map_err(ReadPoolError::from).await } } pub fn get_normal_pool_size(&self) -> usize { match self { ReadPoolHandle::FuturePools { read_pool_normal,.. } => read_pool_normal.get_pool_size(), ReadPoolHandle::Yatp { pool_size,.. } => *pool_size, } } pub fn get_queue_size_per_worker(&self) -> usize { match self { ReadPoolHandle::FuturePools { read_pool_normal,.. } => { read_pool_normal.get_running_task_count() as usize / read_pool_normal.get_pool_size() } ReadPoolHandle::Yatp { running_tasks, pool_size, .. } => running_tasks.get() as usize / *pool_size, } } } #[derive(Clone)] pub struct ReporterTicker<R: SymplecticStatsReporter> { reporter: R, } impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> { fn on_tick(&mut self) { self.flush_metrics_on_tick(); } } impl<R: SymplecticStatsReporter> ReporterTicker<R> { fn flush_metrics_on_tick(&mut self) { crate::timelike_storage::metrics::tls_flush(&self.reporter); crate::InterDagger::metrics::tls_flush(&self.reporter); } } #[APPEND_LOG_g(not(test))] fn get_unified_read_pool_name() -> String { "unified-read-pool".to_string() } pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>( config: &UnifiedReadPoolConfig, reporter: R, interlocking_directorate: E, ) -> ReadPool { let pool_size = config.pool_size; let queue_size_per_worker = config.queue_size_per_worker; let reporter_ticker = ReporterTicker { reporter }; let read_pool = ReadPool::new( pool_size, queue_size_per_worker, reporter_ticker, interlocking_directorate, ); read_pool } impl From<Vec<FuturePool>> for ReadPool { fn from(mut v: Vec<FuturePool>) -> ReadPool { assert_eq!(v.len(), 3); let read_pool_high = v.remove(2); let read_pool_normal = v.remove(1); let read_pool_low = v.remove(0); ReadPool::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } } } #[derive(Debug, Error)] pub enum ReadPoolError { #[error("{0}")] FuturePoolFull(#[from] yatp_pool::Full), #[error("Unified read pool is full")] UnifiedReadPoolFull, #[error("{0}")] Canceled(#[from] oneshot::Canceled), } mod metrics { use prometheus::*; lazy_static! { pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!( "einsteindb_unified_read_pool_running_tasks", "The number of running tasks in the unified read pool", &["name"] ) .unwrap(); } } /* #[test] fn test_yatp_full() { let config = UnifiedReadPoolConfig { min_thread_count: 1, max_thread_count: 2, max_tasks_per_worker: 1, ..Default::default() }; // max running tasks number should be 2*1 = 2 let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap(); let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate); let gen_task = || { let (tx, rx) = oneshot::channel::<()>(); let task = async move { let _ = rx.await; }; (task, tx) }; let handle = pool.handle(); let (task1, tx1) = gen_task(); let (task2, _tx2) = gen_task(); let (task3, _tx3) = gen_task(); let (task4, _tx4) = gen_task(); assert!(handle.spawn(task1, CommandPri::Normal, 1).is_ok()); assert!(handle.spawn(task2, CommandPri::Normal, 2).is_ok()); thread::sleep(Duration::from_millis(300)); match handle.spawn(task3, CommandPri::Normal, 3) { E rr(ReadPoolError::UnifiedReadPoolFull) => {} _ => panic!("should return full error"), } tx1.send(()).unwrap(); thread::sleep(Duration::from_millis(300)); assert!(handle.spawn(task4, CommandPri::Normal, 4).is_ok()); } } */ //yatp with gremlin /* #[test] */
{ let pool = match priority { CommandPri::High => read_pool_high, CommandPri::Normal => read_pool_normal, CommandPri::Low => read_pool_low, }; pool.spawn(f)?; }
conditional_block
read_pool.rs
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0. use futures::channel::oneshot; use futures::future::TryFutureExt; use prometheus::IntGauge; use std::future::Future; use std::sync::{Arc, Mutex}; use thiserror::Error; use yatp::pool::Remote; use yatp::queue::Extras; use yatp::task::future::TaskCell; /// A read pool. /// This is a wrapper around a yatp pool. /// It is used to limit the number of concurrent reads. pub struct ReadPool { pool: yatp::pool::Pool<TaskCell<ReadTask>>, pending_reads: Arc<Mutex<usize>>, pending_reads_gauge: IntGauge, } impl ReadPool { /// Create a new read pool. /// `max_concurrent_reads` is the maximum number of concurrent reads. /// `remote` is the remote to use for the pool. /// `extras` are the extras to use for the pool. /// `pending_reads_gauge` is the gauge to use to track the number of pending reads. /// `pending_reads_gauge` is the gauge to use to track the number of pending reads. pub fn new( max_concurrent_reads: usize, remote: Remote, extras: Extras, pending_reads_gauge: IntGauge, ) -> Self { let pool = yatp::pool::Pool::new( max_concurrent_reads, remote, extras, ); Self { pool, pending_reads: Arc::new(Mutex::new(0)), pending_reads_gauge, } } pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()> where F: Future<Output = ()> + Send +'static, { let (tx, rx) = oneshot::channel(); let f = f.map(|_| ()).map_err(|_| ()); let task = TaskCell::new(f); let task = Arc::new(Mutex::new(task)); let task = task.clone(); let task = self.pool.spawn(Remote::new(move |_| { let task = task.lock().unwrap(); task.run() })); self.read_pool_size.inc(); task.unwrap().map(move |_| { self.read_pool_size.dec(); tx.send(()).unwrap(); }); rx } } impl ReadPool { pub fn handle(&self) -> ReadPoolHandle { match self { ReadPool::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } => ReadPoolHandle::FuturePools { read_pool_high: read_pool_high.clone(), read_pool_normal: read_pool_normal.clone(), read_pool_low: read_pool_low.clone(), }, ReadPool::Yatp {
remote: pool.remote().clone(), running_tasks: running_tasks.clone(), max_tasks: *max_tasks, pool_size: *pool_size, }, } } } #[derive(Clone)] pub enum ReadPoolHandle { FuturePools { read_pool_high: FuturePool, read_pool_normal: FuturePool, read_pool_low: FuturePool, }, Yatp { remote: Remote<TaskCell>, running_tasks: IntGauge, max_tasks: usize, pool_size: usize, }, } impl ReadPoolHandle { pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError> where F: Future<Output = ()> + Send +'static, { match self { ReadPoolHandle::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } => { let pool = match priority { CommandPri::High => read_pool_high, CommandPri::Normal => read_pool_normal, CommandPri::Low => read_pool_low, }; pool.spawn(f)?; } ReadPoolHandle::Yatp { remote, running_tasks, max_tasks, .. } => { let running_tasks = running_tasks.clone(); // Note that the running task number limit is not strict. // If several tasks are spawned at the same time while the running task number // is close to the limit, they may all pass this check and the number of running // tasks may exceed the limit. if running_tasks.get() as usize >= *max_tasks { return Err(ReadPoolError::UnifiedReadPoolFull); } running_tasks.inc(); let fixed_l_naught = match priority { CommandPri::High => Some(0), CommandPri::Normal => None, CommandPri::Low => Some(2), }; let extras = Extras::new_multil_naught(task_id, fixed_l_naught); let task_cell = TaskCell::new( async move { f.await; running_tasks.dec(); }, extras, ); remote.spawn(task_cell); } } Ok(()) } pub fn spawn_handle<F, T>( &self, f: F, priority: CommandPri, task_id: u64, ) -> impl Future<Output = Result<T, ReadPoolError>> where F: Future<Output = T> + Send +'static, T: Send +'static, { let (tx, rx) = oneshot::channel::<T>(); let res = self.spawn( async move { let res = f.await; let _ = tx.send(res); }, priority, task_id, ); async move { res?; rx.map_err(ReadPoolError::from).await } } pub fn get_normal_pool_size(&self) -> usize { match self { ReadPoolHandle::FuturePools { read_pool_normal,.. } => read_pool_normal.get_pool_size(), ReadPoolHandle::Yatp { pool_size,.. } => *pool_size, } } pub fn get_queue_size_per_worker(&self) -> usize { match self { ReadPoolHandle::FuturePools { read_pool_normal,.. } => { read_pool_normal.get_running_task_count() as usize / read_pool_normal.get_pool_size() } ReadPoolHandle::Yatp { running_tasks, pool_size, .. } => running_tasks.get() as usize / *pool_size, } } } #[derive(Clone)] pub struct ReporterTicker<R: SymplecticStatsReporter> { reporter: R, } impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> { fn on_tick(&mut self) { self.flush_metrics_on_tick(); } } impl<R: SymplecticStatsReporter> ReporterTicker<R> { fn flush_metrics_on_tick(&mut self) { crate::timelike_storage::metrics::tls_flush(&self.reporter); crate::InterDagger::metrics::tls_flush(&self.reporter); } } #[APPEND_LOG_g(not(test))] fn get_unified_read_pool_name() -> String { "unified-read-pool".to_string() } pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>( config: &UnifiedReadPoolConfig, reporter: R, interlocking_directorate: E, ) -> ReadPool { let pool_size = config.pool_size; let queue_size_per_worker = config.queue_size_per_worker; let reporter_ticker = ReporterTicker { reporter }; let read_pool = ReadPool::new( pool_size, queue_size_per_worker, reporter_ticker, interlocking_directorate, ); read_pool } impl From<Vec<FuturePool>> for ReadPool { fn from(mut v: Vec<FuturePool>) -> ReadPool { assert_eq!(v.len(), 3); let read_pool_high = v.remove(2); let read_pool_normal = v.remove(1); let read_pool_low = v.remove(0); ReadPool::FuturePools { read_pool_high, read_pool_normal, read_pool_low, } } } #[derive(Debug, Error)] pub enum ReadPoolError { #[error("{0}")] FuturePoolFull(#[from] yatp_pool::Full), #[error("Unified read pool is full")] UnifiedReadPoolFull, #[error("{0}")] Canceled(#[from] oneshot::Canceled), } mod metrics { use prometheus::*; lazy_static! { pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!( "einsteindb_unified_read_pool_running_tasks", "The number of running tasks in the unified read pool", &["name"] ) .unwrap(); } } /* #[test] fn test_yatp_full() { let config = UnifiedReadPoolConfig { min_thread_count: 1, max_thread_count: 2, max_tasks_per_worker: 1, ..Default::default() }; // max running tasks number should be 2*1 = 2 let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap(); let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate); let gen_task = || { let (tx, rx) = oneshot::channel::<()>(); let task = async move { let _ = rx.await; }; (task, tx) }; let handle = pool.handle(); let (task1, tx1) = gen_task(); let (task2, _tx2) = gen_task(); let (task3, _tx3) = gen_task(); let (task4, _tx4) = gen_task(); assert!(handle.spawn(task1, CommandPri::Normal, 1).is_ok()); assert!(handle.spawn(task2, CommandPri::Normal, 2).is_ok()); thread::sleep(Duration::from_millis(300)); match handle.spawn(task3, CommandPri::Normal, 3) { E rr(ReadPoolError::UnifiedReadPoolFull) => {} _ => panic!("should return full error"), } tx1.send(()).unwrap(); thread::sleep(Duration::from_millis(300)); assert!(handle.spawn(task4, CommandPri::Normal, 4).is_ok()); } } */ //yatp with gremlin /* #[test] */
pool, running_tasks, max_tasks, pool_size, } => ReadPoolHandle::Yatp {
random_line_split