file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
com.rs
//! Common utilities //! //! A standard vocabulary used throughout the code. use std::{self, cmp, convert, fmt, hash, iter, marker, num, ops, sync}; use crate::basic::sea::TableIndex; /// A fragment of source code. #[derive(Clone)] pub struct CodeFragment(sync::Arc<Vec<u8>>); impl CodeFragment { /// Creates a new `CodeFragment`. pub fn new(code: Vec<u8>) -> CodeFragment { CodeFragment(sync::Arc::new(code)) } } impl ops::Deref for CodeFragment { type Target = [u8]; fn deref(&self) -> &[u8] { &*self.0 } } /// The core implementation of a u32-based ID. /// /// The ID can be any number in the `[0, u32::MAX - 2]` range: /// - `u32::MAX` is reserved to enable size optimizations (Option). /// - `u32::MAX - 1` is reserved to denote Default constructed IDs. /// /// IDs built on top of `CoreId` may reserve further numbers for their own ends. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct CoreId(num::NonZeroU32); impl CoreId { /// Creates a new instance. /// /// # Panics /// /// Panics if the integer provided is `u32::MAX`. pub fn new(id: u32) -> CoreId { if id == std::u32::MAX { panic!("Unsuitable ID: {}", id); } unsafe { CoreId(num::NonZeroU32::new_unchecked(id + 1)) } } /// Get the raw ID. pub fn raw(&self) -> u32 { self.0.get() - 1 } } impl fmt::Debug for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl Default for CoreId { fn default() -> CoreId { unsafe { CoreId(num::NonZeroU32::new_unchecked(std::u32::MAX)) } } } impl fmt::Display for CoreId { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self.raw()) } } impl convert::From<CoreId> for u32 { fn from(core_id: CoreId) -> u32 { core_id.raw() } } /// An Id implementation based on CoreId. /// /// It contains a default empty state, to represent empty streams. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Id<T:?Sized>(CoreId, marker::PhantomData<*const T>); impl<T:?Sized> Id<T> { /// Creates a new instance. pub fn new(id: u32) -> Self { Id(CoreId::new(id), marker::PhantomData) } /// Creates an empty instance. pub fn empty() -> Self { Self::new(std::u32::MAX - 2) } /// Returns whether the corresponding list is empty. pub fn is_empty(&self) -> bool { *self == Self::empty() } /// Returns the inner ID. pub fn value(&self) -> u32 { self.0.raw() } } impl<T:?Sized> Clone for Id<T> { fn clone(&self) -> Self { *self } } impl<T:?Sized> Copy for Id<T> {} impl<T:?Sized> fmt::Debug for Id<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { const MODULE_OFFSET: usize = 1usize << 30; const REPOSITORY_OFFSET: usize = 1usize << 31; // More compact representation for `{:#?}`. // // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. if *self == Default::default() { write!(f, "Id(default)") } else if *self == Self::empty() { write!(f, "Id(empty)") } else { match self.index() { index if index < MODULE_OFFSET => write!(f, "Id({})", index), index if index < REPOSITORY_OFFSET => write!(f, "Id(M-{})", index - MODULE_OFFSET), index => write!(f, "Id(R-{})", index - REPOSITORY_OFFSET), } } } } impl<T:?Sized> Default for Id<T> { fn default() -> Self { Id(Default::default(), marker::PhantomData) } } impl<T:?Sized> cmp::Eq for Id<T> {} impl<T:?Sized> hash::Hash for Id<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.0.hash(state); } } impl<T:?Sized> cmp::Ord for Id<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { self.0.cmp(&other.0) } } impl<T:?Sized> cmp::PartialEq for Id<T> { fn eq(&self, other: &Self) -> bool { self.0.eq(&other.0) } } impl<T:?Sized> cmp::PartialOrd for Id<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { self.0.partial_cmp(&other.0) } } impl<T:?Sized> TableIndex for Id<T> { fn from_index(index: usize) -> Self { Id::new(index as u32) } fn index(&self) -> usize { self.value() as usize } } /// IdIterator. /// /// An Iterator over consecutive IDs. // #[manual(Clone, Copy, Debug, Default, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct IdIterator<T:?Sized> { start: u32, end: u32, _marker: marker::PhantomData<*const T>, } impl<T:?Sized> IdIterator<T> { /// Creates an instance. pub fn new(start: u32, end: u32) -> Self { IdIterator { start, end, _marker: marker::PhantomData } } } impl<T:?Sized> Clone for IdIterator<T> { fn clone(&self) -> Self { *self } } impl<T:?Sized> Copy for IdIterator<T> {} impl<T:?Sized> fmt::Debug for IdIterator<T> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { // FIXME(matthieum): consider adding `std::intrinsics::type_name<T>()` // once it stabilizes. write!(f, "IdIterator({}, {})", self.start, self.end) } } impl<T:?Sized> Default for IdIterator<T> { fn default() -> Self { IdIterator::new(0, 0) } } impl<T:?Sized> cmp::Eq for IdIterator<T> {} impl<T:?Sized> hash::Hash for IdIterator<T> { fn hash<H: hash::Hasher>(&self, state: &mut H) { self.start.hash(state); self.end.hash(state); } } impl<T:?Sized> iter::Iterator for IdIterator<T> { type Item = Id<T>; fn next(&mut self) -> Option<Id<T>> { if self.start < self.end { let result = Id::new(self.start); self.start += 1; Some(result) } else { None } } fn size_hint(&self) -> (usize, Option<usize>) { let difference = self.len(); (difference, Some(difference)) } fn count(self) -> usize { self.len() } fn last(self) -> Option<Id<T>> { if self.start < self.end { Some(Id::new(self.end - 1)) } else { None } } fn nth(&mut self, n: usize) -> Option<Id<T>> { let result = self.start.saturating_add(n as u32); if result < self.end { self.start = result + 1; Some(Id::new(result)) } else { self.start = self.end; None } } fn max(self) -> Option<Id<T>> { self.last() } fn min(mut self) -> Option<Id<T>> { self.next() } } impl<T:?Sized> iter::DoubleEndedIterator for IdIterator<T> { fn next_back(&mut self) -> Option<Id<T>> { if self.start < self.end { self.end -= 1; Some(Id::new(self.end)) } else { None } } } impl<T:?Sized> iter::ExactSizeIterator for IdIterator<T> { fn len(&self) -> usize { self.end.saturating_sub(self.start) as usize } } impl<T:?Sized> cmp::Ord for IdIterator<T> { fn cmp(&self, other: &Self) -> cmp::Ordering { (self.start, self.end).cmp(&(other.start, other.end)) } } impl<T:?Sized> cmp::PartialEq for IdIterator<T> { fn eq(&self, other: &Self) -> bool { (self.start, self.end).eq(&(other.start, other.end)) } } impl<T:?Sized> cmp::PartialOrd for IdIterator<T> { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { (self.start, self.end).partial_cmp(&(other.start, other.end)) } } /// A Range represents a start and end position in a buffer. /// /// Note: the `Range` does not know which buffer it indexes in. /// /// Note: a `Range` cannot index past 4GB. #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Range { offset: u32, length: u32, } impl Range { /// Creates a new `Range` from a start position and length. /// /// In Debug, it is checked that the end position will not exceed 4GB. pub fn new(offset: usize, length: usize) -> Range { debug_assert!(offset <= std::u32::MAX as usize); debug_assert!(length <= std::u32::MAX as usize); debug_assert!(offset <= (std::u32::MAX as usize - length)); Range { offset: offset as u32, length: length as u32 } } /// Creates a new `Range` from a start and end position. /// /// As the name implies, this creates a half-open range, similar to `start..end`. pub fn half_open(start: u32, end: u32) -> Range { debug_assert!(start <= end); Range { offset: start, length: end - start } } /// Returns the start position of the range. pub fn offset(self) -> usize { self.offset as usize } /// Returns the end position of the range (excluded). pub fn end_offset(self) -> usize { self.offset() + self.length() } /// Returns the length of the range. pub fn length(self) -> usize { self.length as usize } /// Shifts range to the left. pub fn shift_left(self, n: usize) -> Range { self.shift_to(self.offset() - n) } /// Shifts range to the right. pub fn shift_right(self, n: usize) -> Range { self.shift_to(self.offset() + n) } /// Shifts range to specified offset. pub fn shift_to(self, offset: usize) -> Range { Range { offset: offset as u32,..self } } /// Skips n from the left. pub fn skip_left(self, n: usize) -> Range { Range { offset: self.offset + (n as u32), length: self.length - (n as u32), } } /// Skips n from the right. pub fn skip_right(self, n: usize) -> Range { Range { offset: self.offset, length: self.length - (n as u32), } } /// Extend one range with another, the resulting range spans both ranges, /// and in the case they were discontiguous also spans the interval. pub fn extend(self, other: Range) -> Range { if self.offset > other.offset { other.extend(self) } else if self.end_offset() >= other.end_offset()
else { Range { offset: self.offset, length: (other.end_offset() - self.offset()) as u32 } } } } impl fmt::Debug for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl Default for Range { fn default() -> Range { Range::new(0, 0) } } impl fmt::Display for Range { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}@{}", self.length, self.offset) } } impl ops::Index<Range> for [u8] { type Output = [u8]; fn index(&self, index: Range) -> &[u8] { &self[index.offset()..index.end_offset()] } } /// A Slice of bytes, printed more pleasantly #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] pub struct Slice<'a>(pub &'a [u8]); impl<'a> Slice<'a> { /// Returns true if empty, false otherwise. pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the length of the slice. pub fn len(&self) -> usize { self.0.len() } /// Returns the byte at the indicated position, or None if it is invalid. pub fn get(&self, pos: usize) -> Option<&u8> { self.0.get(pos) } } impl<'a> fmt::Debug for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { write!(f, "{}", self) } } impl<'a> fmt::Display for Slice<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { let mut start = 0; while start < self.0.len() { let end = self.0[start..].iter().position(|&b| b < 32 || b > 126) .unwrap_or(self.len()); f.write_str( std::str::from_utf8(&self.0[start..end]).expect("Valid UTF-8") )?; start = end; let end = self.0[start..].iter().position(|&b| b >= 32 && b <= 126) .unwrap_or(self.len()); for &byte in &self.0[start..end] { write!(f, "{{0x{:X}}}", byte)?; } start = end; } Ok(()) } } /// Span pub trait Span { /// Returns the Range spanned by the element. fn span(&self) -> Range; } /// A Store trait, to abstract over the actual storage of individual elements. pub trait Store<T, I = Id<T>> { /// Returns the number of items. fn len(&self) -> usize; /// Returns a copy of the item. fn get(&self, id: I) -> T; /// Returns the range of the item. fn get_range(&self, id: I) -> Range; /// Pushes an item. fn push(&mut self, item: T, range: Range) -> I; } /// A MultiStore trait, to abstract over the actual storage of slices. pub trait MultiStore<T, I = Id<[T]>> { /// Returns the slice of items. fn get_slice(&self, id: I) -> &[T]; // TODO(matthieum): A more efficient interface would take IntoIterator<Item = T> /// Pushes a slice of element. fn push_slice(&mut self, items: &[T]) -> I; } // // Tests // #[cfg(test)] mod tests { use super::{CoreId, Range}; #[test] fn core_id_roundtrip() { for i in 0..10 { assert_eq!(i, CoreId::new(i).raw()); } } #[test] fn core_id_default() { let core: CoreId = Default::default(); assert_eq!(std::u32::MAX - 1, core.raw()); } #[test] #[should_panic] fn core_id_reserved_size_optimization() { CoreId::new(std::u32::MAX); } #[test] fn range_extend_contiguous() { let result = Range::new(3, 4).extend(Range::new(7, 2)); assert_eq!(result, Range::new(3, 6)); } #[test] fn range_extend_separated() { let result = Range::new(3, 4).extend(Range::new(11, 3)); assert_eq!(result, Range::new(3, 11)); } #[test] fn range_extend_partially_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 3)); assert_eq!(result, Range::new(3, 5)); } #[test] fn range_extend_totally_overlapping() { let result = Range::new(3, 4).extend(Range::new(5, 2)); assert_eq!(result, Range::new(3, 4)); } #[test] fn range_extend_reversed() { let result = Range::new(5, 3).extend(Range::new(3, 4)); assert_eq!(result, Range::new(3, 5)); } }
{ self }
conditional_block
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase.
/// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
///
random_line_split
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, )
self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
{ const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden.
identifier_body
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else
} CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn filter_intersection_pair(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
{ // Discard all the contacts. self.solver_contacts.clear(); }
conditional_block
physics_hooks.rs
use crate::dynamics::{RigidBodyHandle, RigidBodySet}; use crate::geometry::{ColliderHandle, ColliderSet, ContactManifold, SolverContact, SolverFlags}; use crate::math::{Real, Vector}; use na::ComplexField; /// Context given to custom collision filters to filter-out collisions. pub struct PairFilterContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, } /// Context given to custom contact modifiers to modify the contacts seen by the constraints solver. pub struct ContactModificationContext<'a> { /// The set of rigid-bodies. pub bodies: &'a RigidBodySet, /// The set of colliders. pub colliders: &'a ColliderSet, /// The handle of the first collider involved in the potential collision. pub collider1: ColliderHandle, /// The handle of the first collider involved in the potential collision. pub collider2: ColliderHandle, /// The handle of the first body involved in the potential collision. pub rigid_body1: Option<RigidBodyHandle>, /// The handle of the first body involved in the potential collision. pub rigid_body2: Option<RigidBodyHandle>, /// The contact manifold. pub manifold: &'a ContactManifold, /// The solver contacts that can be modified. pub solver_contacts: &'a mut Vec<SolverContact>, /// The contact normal that can be modified. pub normal: &'a mut Vector<Real>, /// User-defined data attached to the manifold. // NOTE: we keep this a &'a mut u32 to emphasize the // fact that this can be modified. pub user_data: &'a mut u32, } impl<'a> ContactModificationContext<'a> { /// Helper function to update `self` to emulate a oneway-platform. /// /// The "oneway" behavior will only allow contacts between two colliders /// if the local contact normal of the first collider involved in the contact /// is almost aligned with the provided `allowed_local_n1` direction. /// /// To make this method work properly it must be called as part of the /// `PhysicsHooks::modify_solver_contacts` method at each timestep, for each /// contact manifold involving a one-way platform. The `self.user_data` field /// must not be modified from the outside of this method. pub fn update_as_oneway_platform( &mut self, allowed_local_n1: &Vector<Real>, allowed_angle: Real, ) { const CONTACT_CONFIGURATION_UNKNOWN: u32 = 0; const CONTACT_CURRENTLY_ALLOWED: u32 = 1; const CONTACT_CURRENTLY_FORBIDDEN: u32 = 2; let cang = ComplexField::cos(allowed_angle); // Test the allowed normal with the local-space contact normal that // points towards the exterior of context.collider1. let contact_is_ok = self.manifold.local_n1.dot(&allowed_local_n1) >= cang; match *self.user_data { CONTACT_CONFIGURATION_UNKNOWN => { if contact_is_ok { // The contact is close enough to the allowed normal. *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // The contact normal isn't close enough to the allowed // normal, so remove all the contacts and mark further contacts // as forbidden. self.solver_contacts.clear(); // NOTE: in some very rare cases `local_n1` will be // zero if the objects are exactly touching at one point. // So in this case we can't really conclude. // If the norm is non-zero, then we can tell we need to forbid // further contacts. Otherwise we have to wait for the next frame. if self.manifold.local_n1.norm_squared() > 0.1 { *self.user_data = CONTACT_CURRENTLY_FORBIDDEN; } } } CONTACT_CURRENTLY_FORBIDDEN => { // Contacts are forbidden so we need to continue forbidding contacts // until all the contacts are non-penetrating again. In that case, if // the contacts are OK wrt. the contact normal, then we can mark them as allowed. if contact_is_ok && self.solver_contacts.iter().all(|c| c.dist > 0.0) { *self.user_data = CONTACT_CURRENTLY_ALLOWED; } else { // Discard all the contacts. self.solver_contacts.clear(); } } CONTACT_CURRENTLY_ALLOWED => { // We allow all the contacts right now. The configuration becomes // uncertain again when the contact manifold no longer contains any contact. if self.solver_contacts.is_empty() { *self.user_data = CONTACT_CONFIGURATION_UNKNOWN; } } _ => unreachable!(), } } } bitflags::bitflags! { #[cfg_attr(feature = "serde-serialize", derive(Serialize, Deserialize))] /// Flags affecting the behavior of the constraints solver for a given contact manifold. pub struct ActiveHooks: u32 { /// If set, Rapier will call `PhysicsHooks::filter_contact_pair` whenever relevant. const FILTER_CONTACT_PAIRS = 0b0001; /// If set, Rapier will call `PhysicsHooks::filter_intersection_pair` whenever relevant. const FILTER_INTERSECTION_PAIR = 0b0010; /// If set, Rapier will call `PhysicsHooks::modify_solver_contact` whenever relevant. const MODIFY_SOLVER_CONTACTS = 0b0100; } } impl Default for ActiveHooks { fn default() -> Self { ActiveHooks::empty() } } // TODO: right now, the wasm version don't have the Send+Sync bounds. // This is because these bounds are very difficult to fulfill if we want to // call JS closures. Also, parallelism cannot be enabled for wasm targets, so // not having Send+Sync isn't a problem. /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(target_arch = "wasm32")] pub trait PhysicsHooks { /// Applies the contact pair filter. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { None } /// Applies the intersection pair filter. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { false } /// Modifies the set of contacts seen by the constraints solver. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } /// User-defined functions called by the physics engines during one timestep in order to customize its behavior. #[cfg(not(target_arch = "wasm32"))] pub trait PhysicsHooks: Send + Sync { /// Applies the contact pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_CONTACT_PAIRS` flags /// in its physics hooks flags. /// /// User-defined filter for potential contact pairs detected by the broad-phase. /// This can be used to apply custom logic in order to decide whether two colliders /// should have their contact computed by the narrow-phase, and if these contact /// should be solved by the constraints solver /// /// Note that using a contact pair filter will replace the default contact filtering /// which consists of preventing contact computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `None`, then the narrow-phase will ignore this contact pair and /// not compute any contact manifolds for it. /// If this returns `Some`, then the narrow-phase will compute contact manifolds for /// this pair of colliders, and configure them with the returned solver flags. For /// example, if this returns `Some(SolverFlags::COMPUTE_IMPULSES)` then the contacts /// will be taken into account by the constraints solver. If this returns /// `Some(SolverFlags::empty())` then the constraints solver will ignore these /// contacts. fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::COMPUTE_IMPULSES) } /// Applies the intersection pair filter. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::FILTER_INTERSECTION_PAIR` flags /// in its physics hooks flags. /// /// User-defined filter for potential intersection pairs detected by the broad-phase. /// /// This can be used to apply custom logic in order to decide whether two colliders /// should have their intersection computed by the narrow-phase. /// /// Note that using an intersection pair filter will replace the default intersection filtering /// which consists of preventing intersection computation between two non-dynamic bodies. /// /// This filtering method is called after taking into account the colliders collision groups. /// /// If this returns `false`, then the narrow-phase will ignore this pair and /// not compute any intersection information for it. /// If this return `true` then the narrow-phase will compute intersection /// information for this pair. fn filter_intersection_pair(&self, _context: &PairFilterContext) -> bool { true } /// Modifies the set of contacts seen by the constraints solver. /// /// Note that this method will only be called if at least one of the colliders /// involved in the contact contains the `ActiveHooks::MODIFY_SOLVER_CONTACTS` flags /// in its physics hooks flags. /// /// By default, the content of `solver_contacts` is computed from `manifold.points`. /// This method will be called on each contact manifold which have the flag `SolverFlags::modify_solver_contacts` set. /// This method can be used to modify the set of solver contacts seen by the constraints solver: contacts /// can be removed and modified. /// /// Note that if all the contacts have to be ignored by the constraint solver, you may simply /// do `context.solver_contacts.clear()`. /// /// Modifying the solver contacts allow you to achieve various effects, including: /// - Simulating conveyor belts by setting the `surface_velocity` of a solver contact. /// - Simulating shapes with multiply materials by modifying the friction and restitution /// coefficient depending of the features in contacts. /// - Simulating one-way platforms depending on the contact normal. /// /// Each contact manifold is given a `u32` user-defined data that is persistent between /// timesteps (as long as the contact manifold exists). This user-defined data is initialized /// as 0 and can be modified in `context.user_data`. /// /// The world-space contact normal can be modified in `context.normal`. fn modify_solver_contacts(&self, _context: &mut ContactModificationContext) {} } impl PhysicsHooks for () { fn filter_contact_pair(&self, _context: &PairFilterContext) -> Option<SolverFlags> { Some(SolverFlags::default()) } fn
(&self, _: &PairFilterContext) -> bool { true } fn modify_solver_contacts(&self, _: &mut ContactModificationContext) {} }
filter_intersection_pair
identifier_name
main.rs
, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled &&!self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() }!= 0 &&!matches!(opt.command, Command::Completions {.. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if!Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()?
Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref()!= Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if!peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as
{ println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); }
conditional_block
main.rs
, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled &&!self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error>
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() }!= 0 &&!matches!(opt.command, Command::Completions {.. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if!Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref()!= Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if!peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as
{ let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) }
identifier_body
main.rs
/// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn admin_capable(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled &&!self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() }!= 0 &&!matches!(opt.command, Command::Completions {.. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if!Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref()!= Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if!peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header(shared::INNERNET_PUBKEY_HEADER, key.to_base64()) .body(Body::empty()) .unwrap(); let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) }
#[tokio::test] async fn test_unparseable_public_key() -> Result<(), Error> { let server = test::Server::new()?;
random_line_split
main.rs
, DeviceUpdate, InterfaceName, Key, PeerConfigBuilder}; pub mod api; pub mod db; pub mod error; #[cfg(test)] mod test; pub mod util; mod initialize; use db::{DatabaseCidr, DatabasePeer}; pub use error::ServerError; use initialize::InitializeOpts; use shared::{prompts, wg, CidrTree, Error, Interface, SERVER_CONFIG_DIR, SERVER_DATABASE_DIR}; pub use shared::{Association, AssociationContents}; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); #[derive(Debug, StructOpt)] #[structopt(name = "innernet-server", about, global_settings(&[AppSettings::ColoredHelp, AppSettings::DeriveDisplayOrder, AppSettings::VersionlessSubcommands, AppSettings::UnifiedHelpMessage]))] struct Opt { #[structopt(subcommand)] command: Command, #[structopt(flatten)] network: NetworkOpt, } #[derive(Debug, StructOpt)] enum Command { /// Create a new network. #[structopt(alias = "init")] New { #[structopt(flatten)] opts: InitializeOpts, }, /// Permanently uninstall a created network, rendering it unusable. Use with care. Uninstall { interface: Interface }, /// Serve the coordinating server for an existing network. Serve { interface: Interface, #[structopt(flatten)] network: NetworkOpt, }, /// Add a peer to an existing network. AddPeer { interface: Interface, #[structopt(flatten)] args: AddPeerOpts, }, /// Rename an existing peer. RenamePeer { interface: Interface, #[structopt(flatten)] args: RenamePeerOpts, }, /// Add a new CIDR to an existing network. AddCidr { interface: Interface, #[structopt(flatten)] args: AddCidrOpts, }, /// Delete a CIDR. DeleteCidr { interface: Interface, #[structopt(flatten)] args: DeleteCidrOpts, }, /// Generate shell completion scripts Completions { #[structopt(possible_values = &structopt::clap::Shell::variants(), case_insensitive = true)] shell: structopt::clap::Shell, }, } pub type Db = Arc<Mutex<Connection>>; pub type Endpoints = Arc<RwLock<HashMap<String, SocketAddr>>>; #[derive(Clone)] pub struct Context { pub db: Db, pub endpoints: Arc<RwLock<HashMap<String, SocketAddr>>>, pub interface: InterfaceName, pub backend: Backend, pub public_key: Key, } pub struct Session { pub context: Context, pub peer: DatabasePeer, } impl Session { pub fn
(&self) -> bool { self.peer.is_admin && self.user_capable() } pub fn user_capable(&self) -> bool { !self.peer.is_disabled && self.peer.is_redeemed } pub fn redeemable(&self) -> bool { !self.peer.is_disabled &&!self.peer.is_redeemed } } #[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "kebab-case")] pub struct ConfigFile { /// The server's WireGuard key pub private_key: String, /// The listen port of the server pub listen_port: u16, /// The internal WireGuard IP address assigned to the server pub address: IpAddr, /// The CIDR prefix of the WireGuard network pub network_cidr_prefix: u8, } impl ConfigFile { pub fn write_to_path<P: AsRef<Path>>(&self, path: P) -> Result<(), Error> { let mut invitation_file = File::create(&path).with_path(&path)?; shared::chmod(&invitation_file, 0o600)?; invitation_file .write_all(toml::to_string(self).unwrap().as_bytes()) .with_path(path)?; Ok(()) } pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self, Error> { let path = path.as_ref(); let file = File::open(path).with_path(path)?; if shared::chmod(&file, 0o600)? { println!( "{} updated permissions for {} to 0600.", "[!]".yellow(), path.display() ); } Ok(toml::from_slice(&std::fs::read(&path).with_path(path)?)?) } } #[derive(Clone, Debug, Default)] pub struct ServerConfig { wg_manage_dir_override: Option<PathBuf>, wg_dir_override: Option<PathBuf>, } impl ServerConfig { fn database_dir(&self) -> &Path { self.wg_manage_dir_override .as_deref() .unwrap_or(*SERVER_DATABASE_DIR) } fn database_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.database_dir()) .join(interface.to_string()) .with_extension("db") } fn config_dir(&self) -> &Path { self.wg_dir_override .as_deref() .unwrap_or(*SERVER_CONFIG_DIR) } fn config_path(&self, interface: &InterfaceName) -> PathBuf { PathBuf::new() .join(self.config_dir()) .join(interface.to_string()) .with_extension("conf") } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { if env::var_os("RUST_LOG").is_none() { // Set some default log settings. env::set_var("RUST_LOG", "warn,warp=info,wg_manage_server=info"); } pretty_env_logger::init(); let opt = Opt::from_args(); if unsafe { libc::getuid() }!= 0 &&!matches!(opt.command, Command::Completions {.. }) { return Err("innernet-server must run as root.".into()); } let conf = ServerConfig::default(); match opt.command { Command::New { opts } => { if let Err(e) = initialize::init_wizard(&conf, opts) { eprintln!("{}: {}.", "creation failed".red(), e); std::process::exit(1); } }, Command::Uninstall { interface } => uninstall(&interface, &conf, opt.network)?, Command::Serve { interface, network: routing, } => serve(*interface, &conf, routing).await?, Command::AddPeer { interface, args } => add_peer(&interface, &conf, args, opt.network)?, Command::RenamePeer { interface, args } => rename_peer(&interface, &conf, args)?, Command::AddCidr { interface, args } => add_cidr(&interface, &conf, args)?, Command::DeleteCidr { interface, args } => delete_cidr(&interface, &conf, args)?, Command::Completions { shell } => { Opt::clap().gen_completions_to("innernet-server", shell, &mut std::io::stdout()); std::process::exit(0); }, } Ok(()) } fn open_database_connection( interface: &InterfaceName, conf: &ServerConfig, ) -> Result<rusqlite::Connection, Box<dyn std::error::Error>> { let database_path = conf.database_path(&interface); if!Path::new(&database_path).exists() { return Err(format!( "no database file found at {}", database_path.to_string_lossy() ) .into()); } let conn = Connection::open(&database_path)?; // Foreign key constraints aren't on in SQLite by default. Enable. conn.pragma_update(None, "foreign_keys", &1)?; db::auto_migrate(&conn)?; Ok(conn) } fn add_peer( interface: &InterfaceName, conf: &ServerConfig, opts: AddPeerOpts, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(interface))?; let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidrs = DatabaseCidr::list(&conn)?; let cidr_tree = CidrTree::new(&cidrs[..]); if let Some((peer_request, keypair)) = shared::prompts::add_peer(&peers, &cidr_tree, &opts)? { let peer = DatabasePeer::create(&conn, peer_request)?; if cfg!(not(test)) && Device::get(interface, network.backend).is_ok() { // Update the current WireGuard interface with the new peers. DeviceUpdate::new() .add_peer((&*peer).into()) .apply(interface, network.backend) .map_err(|_| ServerError::WireGuard)?; println!("adding to WireGuard interface: {}", &*peer); } let server_peer = DatabasePeer::get(&conn, 1)?; prompts::save_peer_invitation( interface, &peer, &*server_peer, &cidr_tree, keypair, &SocketAddr::new(config.address, config.listen_port), &opts.save_config, )?; } else { println!("exited without creating peer."); } Ok(()) } fn rename_peer( interface: &InterfaceName, conf: &ServerConfig, opts: RenamePeerOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); if let Some((peer_request, old_name)) = shared::prompts::rename_peer(&peers, &opts)? { let mut db_peer = DatabasePeer::list(&conn)? .into_iter() .find(|p| p.name == old_name) .ok_or( "Peer not found.")?; let _peer = db_peer.update(&conn, peer_request)?; } else { println!("exited without creating peer."); } Ok(()) } fn add_cidr( interface: &InterfaceName, conf: &ServerConfig, opts: AddCidrOpts, ) -> Result<(), Error> { let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; if let Some(cidr_request) = shared::prompts::add_cidr(&cidrs, &opts)? { let cidr = DatabaseCidr::create(&conn, cidr_request)?; printdoc!( " CIDR \"{cidr_name}\" added. Right now, peers within {cidr_name} can only see peers in the same CIDR, and in the special \"innernet-server\" CIDR that includes the innernet server peer. You'll need to add more associations for peers in diffent CIDRs to communicate. ", cidr_name = cidr.name.bold() ); } else { println!("exited without creating CIDR."); } Ok(()) } fn delete_cidr( interface: &InterfaceName, conf: &ServerConfig, args: DeleteCidrOpts, ) -> Result<(), Error> { println!("Fetching eligible CIDRs"); let conn = open_database_connection(interface, conf)?; let cidrs = DatabaseCidr::list(&conn)?; let peers = DatabasePeer::list(&conn)? .into_iter() .map(|dp| dp.inner) .collect::<Vec<_>>(); let cidr_id = prompts::delete_cidr(&cidrs, &peers, &args)?; println!("Deleting CIDR..."); let _ = DatabaseCidr::delete(&conn, cidr_id)?; println!("CIDR deleted."); Ok(()) } fn uninstall( interface: &InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { if Confirm::with_theme(&*prompts::THEME) .with_prompt(&format!( "Permanently delete network \"{}\"?", interface.as_str_lossy().yellow() )) .default(false) .interact()? { println!("{} bringing down interface (if up).", "[*]".dimmed()); wg::down(interface, network.backend).ok(); let config = conf.config_path(interface); let data = conf.database_path(interface); std::fs::remove_file(&config) .with_path(&config) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); std::fs::remove_file(&data) .with_path(&data) .map_err(|e| println!("[!] {}", e.to_string().yellow())) .ok(); println!( "{} network {} is uninstalled.", "[*]".dimmed(), interface.as_str_lossy().yellow() ); } Ok(()) } fn spawn_endpoint_refresher(interface: InterfaceName, network: NetworkOpt) -> Endpoints { let endpoints = Arc::new(RwLock::new(HashMap::new())); tokio::task::spawn({ let endpoints = endpoints.clone(); async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; if let Ok(info) = Device::get(&interface, network.backend) { for peer in info.peers { if let Some(endpoint) = peer.config.endpoint { endpoints .write() .insert(peer.config.public_key.to_base64(), endpoint); } } } } } }); endpoints } fn spawn_expired_invite_sweeper(db: Db) { tokio::task::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(10)); loop { interval.tick().await; match DatabasePeer::delete_expired_invites(&db.lock()) { Ok(deleted) if deleted > 0 => { log::info!("Deleted {} expired peer invitations.", deleted) }, Err(e) => log::error!("Failed to delete expired peer invitations: {}", e), _ => {}, } } }); } async fn serve( interface: InterfaceName, conf: &ServerConfig, network: NetworkOpt, ) -> Result<(), Error> { let config = ConfigFile::from_file(conf.config_path(&interface))?; let conn = open_database_connection(&interface, conf)?; let peers = DatabasePeer::list(&conn)?; let peer_configs = peers .iter() .map(|peer| peer.deref().into()) .collect::<Vec<PeerConfigBuilder>>(); log::info!("bringing up interface."); wg::up( &interface, &config.private_key, IpNetwork::new(config.address, config.network_cidr_prefix)?, Some(config.listen_port), None, network, )?; DeviceUpdate::new() .add_peers(&peer_configs) .apply(&interface, network.backend)?; log::info!("{} peers added to wireguard interface.", peers.len()); let public_key = wgctrl::Key::from_base64(&config.private_key)?.generate_public(); let db = Arc::new(Mutex::new(conn)); let endpoints = spawn_endpoint_refresher(interface, network); spawn_expired_invite_sweeper(db.clone()); let context = Context { db, endpoints, interface, public_key, backend: network.backend, }; log::info!("innernet-server {} starting.", VERSION); let listener = get_listener((config.address, config.listen_port).into(), &interface)?; let make_svc = hyper::service::make_service_fn(move |socket: &AddrStream| { let remote_addr = socket.remote_addr(); let context = context.clone(); async move { Ok::<_, http::Error>(hyper::service::service_fn(move |req: Request<Body>| { log::debug!("{} - {} {}", &remote_addr, req.method(), req.uri()); hyper_service(req, context.clone(), remote_addr) })) } }); let server = hyper::Server::from_tcp(listener)?.serve(make_svc); server.await?; Ok(()) } /// This function differs per OS, because different operating systems have /// opposing characteristics when binding to a specific IP address. /// On Linux, binding to a specific local IP address does *not* bind it to /// that IP's interface, allowing for spoofing attacks. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(target_os = "linux")] fn get_listener(addr: SocketAddr, interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; let sock = socket2::Socket::from(listener); sock.bind_device(Some(interface.as_str_lossy().as_bytes()))?; Ok(sock.into()) } /// BSD-likes do seem to bind to an interface when binding to an IP, /// according to the internet, but we may want to explicitly use /// IP_BOUND_IF in the future regardless. This isn't currently in /// the socket2 crate however, so we aren't currently using it. /// /// See https://github.com/tonarino/innernet/issues/26 for more details. #[cfg(not(target_os = "linux"))] fn get_listener(addr: SocketAddr, _interface: &InterfaceName) -> Result<TcpListener, Error> { let listener = TcpListener::bind(&addr)?; listener.set_nonblocking(true)?; Ok(listener) } pub(crate) async fn hyper_service( req: Request<Body>, context: Context, remote_addr: SocketAddr, ) -> Result<Response<Body>, http::Error> { // Break the path into components. let components: VecDeque<_> = req .uri() .path() .trim_start_matches('/') .split('/') .map(String::from) .collect(); routes(req, context, remote_addr, components) .await .or_else(TryInto::try_into) } async fn routes( req: Request<Body>, context: Context, remote_addr: SocketAddr, mut components: VecDeque<String>, ) -> Result<Response<Body>, ServerError> { // Must be "/v1/[something]" if components.pop_front().as_deref()!= Some("v1") { Err(ServerError::NotFound) } else { let session = get_session(&req, context, remote_addr.ip())?; let component = components.pop_front(); match component.as_deref() { Some("user") => api::user::routes(req, components, session).await, Some("admin") => api::admin::routes(req, components, session).await, _ => Err(ServerError::NotFound), } } } fn get_session( req: &Request<Body>, context: Context, addr: IpAddr, ) -> Result<Session, ServerError> { let pubkey = req .headers() .get(INNERNET_PUBKEY_HEADER) .ok_or(ServerError::Unauthorized)?; let pubkey = pubkey.to_str().map_err(|_| ServerError::Unauthorized)?; let pubkey = Key::from_base64(&pubkey).map_err(|_| ServerError::Unauthorized)?; if pubkey.0.ct_eq(&context.public_key.0).into() { let peer = DatabasePeer::get_from_ip(&context.db.lock(), addr).map_err(|e| match e { rusqlite::Error::QueryReturnedNoRows => ServerError::Unauthorized, e => ServerError::Database(e), })?; if!peer.is_disabled { return Ok(Session { context, peer }); } } Err(ServerError::Unauthorized) } #[cfg(test)] mod tests { use super::*; use crate::test; use anyhow::Result; use hyper::StatusCode; use std::path::Path; #[test] fn test_init_wizard() -> Result<(), Error> { // This runs init_wizard(). let server = test::Server::new()?; assert!(Path::new(&server.wg_conf_path()).exists()); Ok(()) } #[tokio::test] async fn test_with_session_disguised_with_headers() -> Result<(), Error> { let server = test::Server::new()?; let req = Request::builder() .uri(format!("http://{}/v1/admin/peers", test::WG_MANAGE_PEER_IP)) .header("Forwarded", format!("for={}", test::ADMIN_PEER_IP)) .header("X-Forwarded-For", test::ADMIN_PEER_IP) .header("X-Real-IP", test::ADMIN_PEER_IP) .body(Body::empty()) .unwrap(); // Request from an unknown IP, trying to disguise as an admin using HTTP headers. let res = server.raw_request("10.80.80.80", req).await; // addr::remote() filter only look at remote_addr from TCP socket. // HTTP headers are not considered. This also means that innernet // server would not function behind an HTTP proxy. assert_eq!(res.status(), StatusCode::UNAUTHORIZED); Ok(()) } #[tokio::test] async fn test_incorrect_public_key() -> Result<(), Error> { let server = test::Server::new()?; let key = Key::generate_private().generate_public(); // Request from an unknown IP, trying to disguise as
admin_capable
identifier_name
population.rs
// Copyright (c) 2017 Ashley Jeffs // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use unit::Unit; use crossbeam::scope; use rand::{SeedableRng, StdRng}; use rand::distributions::{IndependentSample, Range}; use std::mem; use std::sync::{Arc, Mutex, Condvar}; use std::cmp::Ordering; use std::sync::mpsc::sync_channel; /// Wraps a unit within a struct that lazily evaluates its fitness to avoid /// duplicate work. struct LazyUnit<T: Unit> { unit: T, lazy_fitness: Option<f64>, } impl<T: Unit> LazyUnit<T> { fn from(unit: T) -> Self { LazyUnit { unit: unit, lazy_fitness: None, } } fn fitness(&mut self) -> f64 { match self.lazy_fitness { Some(x) => x, None => { let fitness = self.unit.fitness(); self.lazy_fitness = Some(fitness); fitness } } } } /// Population is an abstraction that represents a collection of units. Each /// unit is a combination of variables, which produces an overall fitness. Units /// mate with other units to produce mutated offspring combining traits from /// both units. /// /// The population is responsible for iterating new generations of units by /// mating fit units and killing unfit units. pub struct Population<T: Unit> { units: Vec<T>, seed: usize, breed_factor: f64, survival_factor: f64, max_size: usize, } impl<T: Unit> Population<T> { /// Creates a new population, starts off with an empty population. If you /// wish to start with a preset population of units you can call /// `set_population` before calling epochs. pub fn new(init_pop: Vec<T>) -> Self { Population { units: init_pop, seed: 1, breed_factor: 0.5, survival_factor: 0.5, max_size: 100, } } //-------------------------------------------------------------------------- /// Sets the random seed of the population. pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self { self.seed = seed; self } /// Sets the maximum size of the population. If already populated with more /// than this amount a random section of the population is killed. pub fn set_size(&mut self, size: usize) -> &mut Self { self.units.truncate(size); self.max_size = size; self } /// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is /// the percentage of the population that will be able to breed per epoch. /// Units that are more fit are preferred for breeding, and so a high /// breed_factor results in more poorly performing units being able to /// breed, which will slow the algorithm down but allow it to escape local /// peaks. pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self { assert!(breed_factor > 0.0 && breed_factor <= 1.0); self.breed_factor = breed_factor; self } /// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which /// is the percentage of the breeding population that will survive each /// epoch. Units that are more fit are preferred for survival, and so a high /// survival rate results in more poorly performing units being carried into /// the next epoch. /// /// Note that this value is a percentage of the breeding population. So if /// your breeding factor is 0.5, and your survival factor is 0.9, the /// percentage of units that will survive the next epoch is: /// /// 0.5 * 0.9 * 100 = 45% /// pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self { assert!(survival_factor >= 0.0 && survival_factor <= 1.0); self.survival_factor = survival_factor; self } //-------------------------------------------------------------------------- /// An epoch that allows units to breed and mutate without harsh culling. /// It's important to sometimes allow 'weak' units to produce generations /// that might escape local peaks in certain dimensions. fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng { assert!(units.len() > 0); // breed_factor dicates how large a percentage of the population will be // able to breed. let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize; let mut breeders: Vec<LazyUnit<T>> = Vec::new(); while let Some(unit) = units.pop() { breeders.push(unit); if breeders.len() == breed_up_to { break; } } units.clear(); // The strongest half of our breeders will survive each epoch. Always at // least one. let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize; let pcnt_range = Range::new(0, breeders.len()); for i in 0..self.max_size - surviving_parents { let rs = pcnt_range.ind_sample(&mut rng); units.push(LazyUnit::from( breeders[i % breeders.len()].unit.breed_with( &breeders[rs].unit, ), )); } // Move our survivors into the new generation. units.append(&mut breeders.drain(0..surviving_parents).collect()); rng } /// Runs a number of epochs where fitness is calculated across n parallel /// processes. This is useful when the fitness calcuation is an expensive /// operation. pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self { scope(|scope| { let cvar_pair = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = sync_channel(0);
for _ in 0..n_processes { let cvar_pair_clone = cvar_pair.clone(); let processed_stack_clone = processed_stack.clone(); let process_queue_clone = process_queue.clone(); scope.spawn(move || { let &(ref lock, ref cvar) = &*cvar_pair_clone; loop { let mut l_unit: LazyUnit<T> = match process_queue_clone.lock().ok().unwrap().recv() { Ok(u) => u, Err(_) => return, }; l_unit.fitness(); processed_stack_clone.lock().ok().unwrap().push(l_unit); { let mut processed = lock.lock().unwrap(); *processed += 1; cvar.notify_all(); } } }); } let &(ref lock, ref cvar) = &*cvar_pair; let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { let jobs_total = active_stack.len(); while let Some(unit) = active_stack.pop() { tx.send(unit).unwrap(); } let mut jobs_processed = lock.lock().unwrap(); while *jobs_processed!= jobs_total { jobs_processed = cvar.wait(jobs_processed).unwrap(); } *jobs_processed = 0; // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap()); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i!= n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } }); self } /// Runs a number of epochs on a single process. pub fn epochs(&mut self, n_epochs: u32) -> &mut Self { let mut processed_stack = Vec::new(); let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { while let Some(mut unit) = active_stack.pop() { unit.fitness(); processed_stack.push(unit); } // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i!= n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } self } //-------------------------------------------------------------------------- /// Returns the full population of units, ordered such that the first /// element is the strongest candidate. This collection can be used to /// create a new population. pub fn finish(&mut self) -> Vec<T> { let mut empty_units: Vec<T> = Vec::new(); mem::swap(&mut empty_units, &mut self.units); empty_units } }
let process_queue = Arc::new(Mutex::new(rx)); let processed_stack = Arc::new(Mutex::new(Vec::new()));
random_line_split
population.rs
// Copyright (c) 2017 Ashley Jeffs // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use unit::Unit; use crossbeam::scope; use rand::{SeedableRng, StdRng}; use rand::distributions::{IndependentSample, Range}; use std::mem; use std::sync::{Arc, Mutex, Condvar}; use std::cmp::Ordering; use std::sync::mpsc::sync_channel; /// Wraps a unit within a struct that lazily evaluates its fitness to avoid /// duplicate work. struct LazyUnit<T: Unit> { unit: T, lazy_fitness: Option<f64>, } impl<T: Unit> LazyUnit<T> { fn from(unit: T) -> Self { LazyUnit { unit: unit, lazy_fitness: None, } } fn fitness(&mut self) -> f64 { match self.lazy_fitness { Some(x) => x, None => { let fitness = self.unit.fitness(); self.lazy_fitness = Some(fitness); fitness } } } } /// Population is an abstraction that represents a collection of units. Each /// unit is a combination of variables, which produces an overall fitness. Units /// mate with other units to produce mutated offspring combining traits from /// both units. /// /// The population is responsible for iterating new generations of units by /// mating fit units and killing unfit units. pub struct Population<T: Unit> { units: Vec<T>, seed: usize, breed_factor: f64, survival_factor: f64, max_size: usize, } impl<T: Unit> Population<T> { /// Creates a new population, starts off with an empty population. If you /// wish to start with a preset population of units you can call /// `set_population` before calling epochs. pub fn
(init_pop: Vec<T>) -> Self { Population { units: init_pop, seed: 1, breed_factor: 0.5, survival_factor: 0.5, max_size: 100, } } //-------------------------------------------------------------------------- /// Sets the random seed of the population. pub fn set_rand_seed(&mut self, seed: usize) -> &mut Self { self.seed = seed; self } /// Sets the maximum size of the population. If already populated with more /// than this amount a random section of the population is killed. pub fn set_size(&mut self, size: usize) -> &mut Self { self.units.truncate(size); self.max_size = size; self } /// Sets the breed_factor (0 < b <= 1) of the genetic algorithm, which is /// the percentage of the population that will be able to breed per epoch. /// Units that are more fit are preferred for breeding, and so a high /// breed_factor results in more poorly performing units being able to /// breed, which will slow the algorithm down but allow it to escape local /// peaks. pub fn set_breed_factor(&mut self, breed_factor: f64) -> &mut Self { assert!(breed_factor > 0.0 && breed_factor <= 1.0); self.breed_factor = breed_factor; self } /// Sets the survival_factor (0 <= b <= 1) of the genetic algorithm, which /// is the percentage of the breeding population that will survive each /// epoch. Units that are more fit are preferred for survival, and so a high /// survival rate results in more poorly performing units being carried into /// the next epoch. /// /// Note that this value is a percentage of the breeding population. So if /// your breeding factor is 0.5, and your survival factor is 0.9, the /// percentage of units that will survive the next epoch is: /// /// 0.5 * 0.9 * 100 = 45% /// pub fn set_survival_factor(&mut self, survival_factor: f64) -> &mut Self { assert!(survival_factor >= 0.0 && survival_factor <= 1.0); self.survival_factor = survival_factor; self } //-------------------------------------------------------------------------- /// An epoch that allows units to breed and mutate without harsh culling. /// It's important to sometimes allow 'weak' units to produce generations /// that might escape local peaks in certain dimensions. fn epoch(&self, units: &mut Vec<LazyUnit<T>>, mut rng: StdRng) -> StdRng { assert!(units.len() > 0); // breed_factor dicates how large a percentage of the population will be // able to breed. let breed_up_to = (self.breed_factor * (units.len() as f64)) as usize; let mut breeders: Vec<LazyUnit<T>> = Vec::new(); while let Some(unit) = units.pop() { breeders.push(unit); if breeders.len() == breed_up_to { break; } } units.clear(); // The strongest half of our breeders will survive each epoch. Always at // least one. let surviving_parents = (breeders.len() as f64 * self.survival_factor).ceil() as usize; let pcnt_range = Range::new(0, breeders.len()); for i in 0..self.max_size - surviving_parents { let rs = pcnt_range.ind_sample(&mut rng); units.push(LazyUnit::from( breeders[i % breeders.len()].unit.breed_with( &breeders[rs].unit, ), )); } // Move our survivors into the new generation. units.append(&mut breeders.drain(0..surviving_parents).collect()); rng } /// Runs a number of epochs where fitness is calculated across n parallel /// processes. This is useful when the fitness calcuation is an expensive /// operation. pub fn epochs_parallel(&mut self, n_epochs: u32, n_processes: u32) -> &mut Self { scope(|scope| { let cvar_pair = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = sync_channel(0); let process_queue = Arc::new(Mutex::new(rx)); let processed_stack = Arc::new(Mutex::new(Vec::new())); for _ in 0..n_processes { let cvar_pair_clone = cvar_pair.clone(); let processed_stack_clone = processed_stack.clone(); let process_queue_clone = process_queue.clone(); scope.spawn(move || { let &(ref lock, ref cvar) = &*cvar_pair_clone; loop { let mut l_unit: LazyUnit<T> = match process_queue_clone.lock().ok().unwrap().recv() { Ok(u) => u, Err(_) => return, }; l_unit.fitness(); processed_stack_clone.lock().ok().unwrap().push(l_unit); { let mut processed = lock.lock().unwrap(); *processed += 1; cvar.notify_all(); } } }); } let &(ref lock, ref cvar) = &*cvar_pair; let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { let jobs_total = active_stack.len(); while let Some(unit) = active_stack.pop() { tx.send(unit).unwrap(); } let mut jobs_processed = lock.lock().unwrap(); while *jobs_processed!= jobs_total { jobs_processed = cvar.wait(jobs_processed).unwrap(); } *jobs_processed = 0; // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack.lock().ok().unwrap()); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i!= n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } }); self } /// Runs a number of epochs on a single process. pub fn epochs(&mut self, n_epochs: u32) -> &mut Self { let mut processed_stack = Vec::new(); let mut active_stack = Vec::new(); while let Some(unit) = self.units.pop() { active_stack.push(LazyUnit::from(unit)); } let seed: &[_] = &[self.seed]; let mut rng: StdRng = SeedableRng::from_seed(seed); for i in 0..(n_epochs + 1) { while let Some(mut unit) = active_stack.pop() { unit.fitness(); processed_stack.push(unit); } // Swap the full processed_stack with the active stack. mem::swap(&mut active_stack, &mut processed_stack); // We want to sort such that highest fitness units are at the // end. active_stack.sort_by(|a, b| { a.lazy_fitness .unwrap_or(0.0) .partial_cmp(&b.lazy_fitness.unwrap_or(0.0)) .unwrap_or(Ordering::Equal) }); // If we have the perfect solution then break early. if active_stack.last().unwrap().lazy_fitness.unwrap_or(0.0) == 1.0 { break; } if i!= n_epochs { rng = self.epoch(&mut active_stack, rng); } } // Reverse the order of units such that the first unit is the // strongest candidate. while let Some(unit) = active_stack.pop() { self.units.push(unit.unit); } self } //-------------------------------------------------------------------------- /// Returns the full population of units, ordered such that the first /// element is the strongest candidate. This collection can be used to /// create a new population. pub fn finish(&mut self) -> Vec<T> { let mut empty_units: Vec<T> = Vec::new(); mem::swap(&mut empty_units, &mut self.units); empty_units } }
new
identifier_name
server.rs
// use std::{ // hash::Hash, // str, // io::Write, // net::{SocketAddr, IpAddr, Ipv4Addr}, // sync::Mutex, // time::{Instant} // }; // use actix_http::{ // body::Body, // http::{ // header::{CONTENT_TYPE, SERVER}, // HeaderValue,
// NewService, // Service, // }; // use actix_server::{ServerConfig}; // use actix_web::dev::Server use actix::prelude::*; // use bytes::{BytesMut, Bytes}; // use futures::{ // future::{ // ok, // join_all, // Future, // }, // Async, Poll, // }; // use serde_json::to_writer; // use actix_web::{ // App, // web, // middleware, // Error as AWError, // HttpResponse, // HttpRequest, // HttpServer, // }; // use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason }; // use askama::Template; //use actix_redis::{Command, RedisActor, Error as ARError}; use actix_redis::{RedisActor}; // use redis_async::{ // client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect}, // resp::{RespValue}, // }; use crate::ws::{Close as WsClose, WsSession}; // use super::db::{RedisConnection}; // pub struct App { // // db: PgConnection, // db: RedisConnection, // // db: Arc<PairedConnection>, // hdr_srv: HeaderValue, // hdr_ctjson: HeaderValue, // hdr_cthtml: HeaderValue, // } // impl Service for App { // type Request = Request; // type Response = Response; // type Error = Error; // type Future = Box<dyn Future<Item = Response, Error = Error>>; // #[inline] // fn poll_ready(&mut self) -> Poll<(), Self::Error> { // Ok(Async::Ready(())) // } // fn call(&mut self, req: Request) -> Self::Future { // let path = req.path(); // match path { // "/db" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_ctjson.clone(); // Box::new(self.db.get("mydomain:one") // .map(|v:String| { // let mut body = BytesMut::new(); // serde_json::to_writer(Writer(&mut body), &Message{ // message: &*v // }).unwrap(); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // }) // ) // } // "/fortune" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_cthtml.clone(); // // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| { // Box::new(ok({ // let mut body = BytesMut::with_capacity(2048); // let mut writer = Writer(&mut body); // let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes }); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // })) // } // "/json" => { // Box::new(ok(json())) // } // "/plaintext" => { // Box::new(ok(plaintext())) // } // // "/queries" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // // "/updates" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.update(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))), // } // } // } // #[derive(Clone)] // pub struct AppFactory; // impl NewService for AppFactory { // type Config = ServerConfig; // type Request = Request; // type Response = Response; // type Error = Error; // type Service = App; // type InitError = (); // type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>; // fn new_service(&self, _: &ServerConfig) -> Self::Future { // // const DB_URL: &str = // // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world"; // // Box::new(PgConnection::connect(DB_URL).map(|db| App { // // db, // // hdr_srv: HeaderValue::from_static("Actix"), // // hdr_ctjson: HeaderValue::from_static("application/json"), // // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // // })); // Box::new( // // paired_connect(&String::from(DB_URL).parse().unwrap()) // RedisConnection::connect(DB_URL) // .map_err(|_| ()) // .map(|db|{ // let app = App { // db, // hdr_srv: HeaderValue::from_static("Actix"), // hdr_ctjson: HeaderValue::from_static("application/json"), // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // }; // app // }) // // }) // ) // } // } // pub fn json() -> HttpResponse { // let message = Message { // message: "Hello, World!", // }; // let mut body = BytesMut::with_capacity(SIZE); // serde_json::to_writer(Writer(&mut body), &message).unwrap(); // let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); // res // } // fn plaintext() -> HttpResponse { // let mut res = HttpResponse::with_body( // StatusCode::OK, // Body::Bytes(Bytes::from_static(b"Hello, World!")), // ); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); // res // } // #[derive(Template)] // #[template(path = "test.html")] // struct HelloTemplate<'a> { // name: &'a str, // } // pub fn root_handler( // req: web::HttpRequest // ) -> impl Future<Item = HttpResponse, Error = ()> { // let path = req.match_info().query("filename").parse().unwrap(); // HttpResponse::from( // Ok( NamedFile::open(path).unwrap() ) // ) // // ok( HttpResponse::Ok().body("hi")) // // Ok(HttpResponse::InternalServerError().finish()) // } pub struct WsServer { sessions: Vec<Addr<WsSession>>, db: Addr<RedisActor>, } impl Actor for WsServer { type Context = Context<Self>; } impl WsServer { pub fn new(db : Addr<RedisActor>) -> WsServer { let sessions = vec![]; WsServer { sessions, db } } } impl WsServer { fn close_all(&self) { // for s in &*self.sessions.lock().unwrap() { for s in &self.sessions { // if let Some(v) = s.upgrade(){ if s.connected() { // println!("sending WsClose"); // v.do_send(WsClose); s.do_send(WsClose); //WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None }))); } } } } /// new websocket connection #[derive(Message)] pub struct Connect { pub addr: Addr<WsSession>, } // impl Message for Connect { // type Result = usize; // } impl Handler<Connect> for WsServer { type Result = (); fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result { // println!("{:?} joined wsserver", msg.addr); // let mut s = &mut *self.sessions.get_mut().unwrap(); let s = &mut self.sessions; s.push(msg.addr); //.downgrade()); println!( "new web socket added to server : {} sockets opened", s.len() ); } } /// websocket session disconnected #[derive(Message)] pub struct Disconnect { pub addr: Addr<WsSession>, // pub id : usize, } impl Handler<Disconnect> for WsServer { type Result = (); fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result { println!("a websocket session requested disconnect"); let mut s = 0; let mut f = false; // let mut ss = &mut *self.sessions.get_mut().unwrap(); let ss = &mut self.sessions; for i in 0..ss.len() { // if let Some(v) = self.sessions[i].upgrade(){ if ss[i] == msg.addr { // if ss[i] == msg.addr { // if v == msg.addr { s = i; f = true; break; // } } } if f { ss.remove(s); println!( "a websocket session removed from server : {} sockets opened", ss.len() ); } } } /// request to close all other connections #[derive(Message)] pub struct CloseAll; impl Handler<CloseAll> for WsServer { type Result = (); fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result { println!("received CloseAll"); self.close_all(); } }
// StatusCode, // }, // Error, Request, Response, // }; // use actix_service::{
random_line_split
server.rs
// use std::{ // hash::Hash, // str, // io::Write, // net::{SocketAddr, IpAddr, Ipv4Addr}, // sync::Mutex, // time::{Instant} // }; // use actix_http::{ // body::Body, // http::{ // header::{CONTENT_TYPE, SERVER}, // HeaderValue, // StatusCode, // }, // Error, Request, Response, // }; // use actix_service::{ // NewService, // Service, // }; // use actix_server::{ServerConfig}; // use actix_web::dev::Server use actix::prelude::*; // use bytes::{BytesMut, Bytes}; // use futures::{ // future::{ // ok, // join_all, // Future, // }, // Async, Poll, // }; // use serde_json::to_writer; // use actix_web::{ // App, // web, // middleware, // Error as AWError, // HttpResponse, // HttpRequest, // HttpServer, // }; // use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason }; // use askama::Template; //use actix_redis::{Command, RedisActor, Error as ARError}; use actix_redis::{RedisActor}; // use redis_async::{ // client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect}, // resp::{RespValue}, // }; use crate::ws::{Close as WsClose, WsSession}; // use super::db::{RedisConnection}; // pub struct App { // // db: PgConnection, // db: RedisConnection, // // db: Arc<PairedConnection>, // hdr_srv: HeaderValue, // hdr_ctjson: HeaderValue, // hdr_cthtml: HeaderValue, // } // impl Service for App { // type Request = Request; // type Response = Response; // type Error = Error; // type Future = Box<dyn Future<Item = Response, Error = Error>>; // #[inline] // fn poll_ready(&mut self) -> Poll<(), Self::Error> { // Ok(Async::Ready(())) // } // fn call(&mut self, req: Request) -> Self::Future { // let path = req.path(); // match path { // "/db" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_ctjson.clone(); // Box::new(self.db.get("mydomain:one") // .map(|v:String| { // let mut body = BytesMut::new(); // serde_json::to_writer(Writer(&mut body), &Message{ // message: &*v // }).unwrap(); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // }) // ) // } // "/fortune" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_cthtml.clone(); // // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| { // Box::new(ok({ // let mut body = BytesMut::with_capacity(2048); // let mut writer = Writer(&mut body); // let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes }); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // })) // } // "/json" => { // Box::new(ok(json())) // } // "/plaintext" => { // Box::new(ok(plaintext())) // } // // "/queries" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // // "/updates" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.update(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))), // } // } // } // #[derive(Clone)] // pub struct AppFactory; // impl NewService for AppFactory { // type Config = ServerConfig; // type Request = Request; // type Response = Response; // type Error = Error; // type Service = App; // type InitError = (); // type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>; // fn new_service(&self, _: &ServerConfig) -> Self::Future { // // const DB_URL: &str = // // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world"; // // Box::new(PgConnection::connect(DB_URL).map(|db| App { // // db, // // hdr_srv: HeaderValue::from_static("Actix"), // // hdr_ctjson: HeaderValue::from_static("application/json"), // // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // // })); // Box::new( // // paired_connect(&String::from(DB_URL).parse().unwrap()) // RedisConnection::connect(DB_URL) // .map_err(|_| ()) // .map(|db|{ // let app = App { // db, // hdr_srv: HeaderValue::from_static("Actix"), // hdr_ctjson: HeaderValue::from_static("application/json"), // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // }; // app // }) // // }) // ) // } // } // pub fn json() -> HttpResponse { // let message = Message { // message: "Hello, World!", // }; // let mut body = BytesMut::with_capacity(SIZE); // serde_json::to_writer(Writer(&mut body), &message).unwrap(); // let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); // res // } // fn plaintext() -> HttpResponse { // let mut res = HttpResponse::with_body( // StatusCode::OK, // Body::Bytes(Bytes::from_static(b"Hello, World!")), // ); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); // res // } // #[derive(Template)] // #[template(path = "test.html")] // struct HelloTemplate<'a> { // name: &'a str, // } // pub fn root_handler( // req: web::HttpRequest // ) -> impl Future<Item = HttpResponse, Error = ()> { // let path = req.match_info().query("filename").parse().unwrap(); // HttpResponse::from( // Ok( NamedFile::open(path).unwrap() ) // ) // // ok( HttpResponse::Ok().body("hi")) // // Ok(HttpResponse::InternalServerError().finish()) // } pub struct WsServer { sessions: Vec<Addr<WsSession>>, db: Addr<RedisActor>, } impl Actor for WsServer { type Context = Context<Self>; } impl WsServer { pub fn new(db : Addr<RedisActor>) -> WsServer { let sessions = vec![]; WsServer { sessions, db } } } impl WsServer { fn close_all(&self) { // for s in &*self.sessions.lock().unwrap() { for s in &self.sessions { // if let Some(v) = s.upgrade(){ if s.connected() { // println!("sending WsClose"); // v.do_send(WsClose); s.do_send(WsClose); //WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None }))); } } } } /// new websocket connection #[derive(Message)] pub struct Connect { pub addr: Addr<WsSession>, } // impl Message for Connect { // type Result = usize; // } impl Handler<Connect> for WsServer { type Result = (); fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result { // println!("{:?} joined wsserver", msg.addr); // let mut s = &mut *self.sessions.get_mut().unwrap(); let s = &mut self.sessions; s.push(msg.addr); //.downgrade()); println!( "new web socket added to server : {} sockets opened", s.len() ); } } /// websocket session disconnected #[derive(Message)] pub struct Disconnect { pub addr: Addr<WsSession>, // pub id : usize, } impl Handler<Disconnect> for WsServer { type Result = (); fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result { println!("a websocket session requested disconnect"); let mut s = 0; let mut f = false; // let mut ss = &mut *self.sessions.get_mut().unwrap(); let ss = &mut self.sessions; for i in 0..ss.len() { // if let Some(v) = self.sessions[i].upgrade(){ if ss[i] == msg.addr
} if f { ss.remove(s); println!( "a websocket session removed from server : {} sockets opened", ss.len() ); } } } /// request to close all other connections #[derive(Message)] pub struct CloseAll; impl Handler<CloseAll> for WsServer { type Result = (); fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result { println!("received CloseAll"); self.close_all(); } }
{ // if ss[i] == msg.addr { // if v == msg.addr { s = i; f = true; break; // } }
conditional_block
server.rs
// use std::{ // hash::Hash, // str, // io::Write, // net::{SocketAddr, IpAddr, Ipv4Addr}, // sync::Mutex, // time::{Instant} // }; // use actix_http::{ // body::Body, // http::{ // header::{CONTENT_TYPE, SERVER}, // HeaderValue, // StatusCode, // }, // Error, Request, Response, // }; // use actix_service::{ // NewService, // Service, // }; // use actix_server::{ServerConfig}; // use actix_web::dev::Server use actix::prelude::*; // use bytes::{BytesMut, Bytes}; // use futures::{ // future::{ // ok, // join_all, // Future, // }, // Async, Poll, // }; // use serde_json::to_writer; // use actix_web::{ // App, // web, // middleware, // Error as AWError, // HttpResponse, // HttpRequest, // HttpServer, // }; // use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason }; // use askama::Template; //use actix_redis::{Command, RedisActor, Error as ARError}; use actix_redis::{RedisActor}; // use redis_async::{ // client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect}, // resp::{RespValue}, // }; use crate::ws::{Close as WsClose, WsSession}; // use super::db::{RedisConnection}; // pub struct App { // // db: PgConnection, // db: RedisConnection, // // db: Arc<PairedConnection>, // hdr_srv: HeaderValue, // hdr_ctjson: HeaderValue, // hdr_cthtml: HeaderValue, // } // impl Service for App { // type Request = Request; // type Response = Response; // type Error = Error; // type Future = Box<dyn Future<Item = Response, Error = Error>>; // #[inline] // fn poll_ready(&mut self) -> Poll<(), Self::Error> { // Ok(Async::Ready(())) // } // fn call(&mut self, req: Request) -> Self::Future { // let path = req.path(); // match path { // "/db" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_ctjson.clone(); // Box::new(self.db.get("mydomain:one") // .map(|v:String| { // let mut body = BytesMut::new(); // serde_json::to_writer(Writer(&mut body), &Message{ // message: &*v // }).unwrap(); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // }) // ) // } // "/fortune" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_cthtml.clone(); // // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| { // Box::new(ok({ // let mut body = BytesMut::with_capacity(2048); // let mut writer = Writer(&mut body); // let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes }); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // })) // } // "/json" => { // Box::new(ok(json())) // } // "/plaintext" => { // Box::new(ok(plaintext())) // } // // "/queries" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // // "/updates" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.update(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))), // } // } // } // #[derive(Clone)] // pub struct AppFactory; // impl NewService for AppFactory { // type Config = ServerConfig; // type Request = Request; // type Response = Response; // type Error = Error; // type Service = App; // type InitError = (); // type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>; // fn new_service(&self, _: &ServerConfig) -> Self::Future { // // const DB_URL: &str = // // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world"; // // Box::new(PgConnection::connect(DB_URL).map(|db| App { // // db, // // hdr_srv: HeaderValue::from_static("Actix"), // // hdr_ctjson: HeaderValue::from_static("application/json"), // // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // // })); // Box::new( // // paired_connect(&String::from(DB_URL).parse().unwrap()) // RedisConnection::connect(DB_URL) // .map_err(|_| ()) // .map(|db|{ // let app = App { // db, // hdr_srv: HeaderValue::from_static("Actix"), // hdr_ctjson: HeaderValue::from_static("application/json"), // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // }; // app // }) // // }) // ) // } // } // pub fn json() -> HttpResponse { // let message = Message { // message: "Hello, World!", // }; // let mut body = BytesMut::with_capacity(SIZE); // serde_json::to_writer(Writer(&mut body), &message).unwrap(); // let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); // res // } // fn plaintext() -> HttpResponse { // let mut res = HttpResponse::with_body( // StatusCode::OK, // Body::Bytes(Bytes::from_static(b"Hello, World!")), // ); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); // res // } // #[derive(Template)] // #[template(path = "test.html")] // struct HelloTemplate<'a> { // name: &'a str, // } // pub fn root_handler( // req: web::HttpRequest // ) -> impl Future<Item = HttpResponse, Error = ()> { // let path = req.match_info().query("filename").parse().unwrap(); // HttpResponse::from( // Ok( NamedFile::open(path).unwrap() ) // ) // // ok( HttpResponse::Ok().body("hi")) // // Ok(HttpResponse::InternalServerError().finish()) // } pub struct WsServer { sessions: Vec<Addr<WsSession>>, db: Addr<RedisActor>, } impl Actor for WsServer { type Context = Context<Self>; } impl WsServer { pub fn new(db : Addr<RedisActor>) -> WsServer { let sessions = vec![]; WsServer { sessions, db } } } impl WsServer { fn close_all(&self) { // for s in &*self.sessions.lock().unwrap() { for s in &self.sessions { // if let Some(v) = s.upgrade(){ if s.connected() { // println!("sending WsClose"); // v.do_send(WsClose); s.do_send(WsClose); //WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None }))); } } } } /// new websocket connection #[derive(Message)] pub struct Connect { pub addr: Addr<WsSession>, } // impl Message for Connect { // type Result = usize; // } impl Handler<Connect> for WsServer { type Result = (); fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result
} /// websocket session disconnected #[derive(Message)] pub struct Disconnect { pub addr: Addr<WsSession>, // pub id : usize, } impl Handler<Disconnect> for WsServer { type Result = (); fn handle(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result { println!("a websocket session requested disconnect"); let mut s = 0; let mut f = false; // let mut ss = &mut *self.sessions.get_mut().unwrap(); let ss = &mut self.sessions; for i in 0..ss.len() { // if let Some(v) = self.sessions[i].upgrade(){ if ss[i] == msg.addr { // if ss[i] == msg.addr { // if v == msg.addr { s = i; f = true; break; // } } } if f { ss.remove(s); println!( "a websocket session removed from server : {} sockets opened", ss.len() ); } } } /// request to close all other connections #[derive(Message)] pub struct CloseAll; impl Handler<CloseAll> for WsServer { type Result = (); fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result { println!("received CloseAll"); self.close_all(); } }
{ // println!("{:?} joined wsserver", msg.addr); // let mut s = &mut *self.sessions.get_mut().unwrap(); let s = &mut self.sessions; s.push(msg.addr); //.downgrade()); println!( "new web socket added to server : {} sockets opened", s.len() ); }
identifier_body
server.rs
// use std::{ // hash::Hash, // str, // io::Write, // net::{SocketAddr, IpAddr, Ipv4Addr}, // sync::Mutex, // time::{Instant} // }; // use actix_http::{ // body::Body, // http::{ // header::{CONTENT_TYPE, SERVER}, // HeaderValue, // StatusCode, // }, // Error, Request, Response, // }; // use actix_service::{ // NewService, // Service, // }; // use actix_server::{ServerConfig}; // use actix_web::dev::Server use actix::prelude::*; // use bytes::{BytesMut, Bytes}; // use futures::{ // future::{ // ok, // join_all, // Future, // }, // Async, Poll, // }; // use serde_json::to_writer; // use actix_web::{ // App, // web, // middleware, // Error as AWError, // HttpResponse, // HttpRequest, // HttpServer, // }; // use actix_web_actors::ws::{Message as WsMessage, CloseCode, CloseReason }; // use askama::Template; //use actix_redis::{Command, RedisActor, Error as ARError}; use actix_redis::{RedisActor}; // use redis_async::{ // client::{PairedConnection, paired_connect, PubsubConnection, pubsub_connect}, // resp::{RespValue}, // }; use crate::ws::{Close as WsClose, WsSession}; // use super::db::{RedisConnection}; // pub struct App { // // db: PgConnection, // db: RedisConnection, // // db: Arc<PairedConnection>, // hdr_srv: HeaderValue, // hdr_ctjson: HeaderValue, // hdr_cthtml: HeaderValue, // } // impl Service for App { // type Request = Request; // type Response = Response; // type Error = Error; // type Future = Box<dyn Future<Item = Response, Error = Error>>; // #[inline] // fn poll_ready(&mut self) -> Poll<(), Self::Error> { // Ok(Async::Ready(())) // } // fn call(&mut self, req: Request) -> Self::Future { // let path = req.path(); // match path { // "/db" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_ctjson.clone(); // Box::new(self.db.get("mydomain:one") // .map(|v:String| { // let mut body = BytesMut::new(); // serde_json::to_writer(Writer(&mut body), &Message{ // message: &*v // }).unwrap(); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // }) // ) // } // "/fortune" => { // let h_srv = self.hdr_srv.clone(); // let h_ct = self.hdr_cthtml.clone(); // // Box::new(self.db.tell_fortune().from_err().map(move |fortunes| { // Box::new(ok({ // let mut body = BytesMut::with_capacity(2048); // let mut writer = Writer(&mut body); // let _ = write!(writer, "{}", HelloTemplate { name : "tester" });//FortunesTemplate { fortunes }); // let mut res = Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // let hdrs = res.headers_mut(); // hdrs.insert(SERVER, h_srv); // hdrs.insert(CONTENT_TYPE, h_ct); // res // })) // } // "/json" => { // Box::new(ok(json())) // } // "/plaintext" => { // Box::new(ok(plaintext())) // } // // "/queries" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.get_worlds(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // // "/updates" => { // // let q = utils::get_query_param(req.uri().query().unwrap_or("")) as usize; // // let h_srv = self.hdr_srv.clone(); // // let h_ct = self.hdr_ctjson.clone(); // // Box::new(self.db.update(q).from_err().map(move |worlds| { // // let mut body = BytesMut::with_capacity(35 * worlds.len()); // // to_writer(Writer(&mut body), &worlds).unwrap(); // // let mut res = // // Response::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // // let hdrs = res.headers_mut(); // // hdrs.insert(SERVER, h_srv); // // hdrs.insert(CONTENT_TYPE, h_ct); // // res // // })) // // } // _ => Box::new(ok(Response::new(StatusCode::NOT_FOUND))), // } // } // } // #[derive(Clone)] // pub struct AppFactory; // impl NewService for AppFactory { // type Config = ServerConfig; // type Request = Request; // type Response = Response; // type Error = Error; // type Service = App; // type InitError = (); // type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>; // fn new_service(&self, _: &ServerConfig) -> Self::Future { // // const DB_URL: &str = // // "postgres://benchmarkdbuser:benchmarkdbpass@tfb-database/hello_world"; // // Box::new(PgConnection::connect(DB_URL).map(|db| App { // // db, // // hdr_srv: HeaderValue::from_static("Actix"), // // hdr_ctjson: HeaderValue::from_static("application/json"), // // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // // })); // Box::new( // // paired_connect(&String::from(DB_URL).parse().unwrap()) // RedisConnection::connect(DB_URL) // .map_err(|_| ()) // .map(|db|{ // let app = App { // db, // hdr_srv: HeaderValue::from_static("Actix"), // hdr_ctjson: HeaderValue::from_static("application/json"), // hdr_cthtml: HeaderValue::from_static("text/html; charset=utf-8"), // }; // app // }) // // }) // ) // } // } // pub fn json() -> HttpResponse { // let message = Message { // message: "Hello, World!", // }; // let mut body = BytesMut::with_capacity(SIZE); // serde_json::to_writer(Writer(&mut body), &message).unwrap(); // let mut res = HttpResponse::with_body(StatusCode::OK, Body::Bytes(body.freeze())); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); // res // } // fn plaintext() -> HttpResponse { // let mut res = HttpResponse::with_body( // StatusCode::OK, // Body::Bytes(Bytes::from_static(b"Hello, World!")), // ); // res.headers_mut() // .insert(SERVER, HeaderValue::from_static("Actix")); // res.headers_mut() // .insert(CONTENT_TYPE, HeaderValue::from_static("text/plain")); // res // } // #[derive(Template)] // #[template(path = "test.html")] // struct HelloTemplate<'a> { // name: &'a str, // } // pub fn root_handler( // req: web::HttpRequest // ) -> impl Future<Item = HttpResponse, Error = ()> { // let path = req.match_info().query("filename").parse().unwrap(); // HttpResponse::from( // Ok( NamedFile::open(path).unwrap() ) // ) // // ok( HttpResponse::Ok().body("hi")) // // Ok(HttpResponse::InternalServerError().finish()) // } pub struct WsServer { sessions: Vec<Addr<WsSession>>, db: Addr<RedisActor>, } impl Actor for WsServer { type Context = Context<Self>; } impl WsServer { pub fn new(db : Addr<RedisActor>) -> WsServer { let sessions = vec![]; WsServer { sessions, db } } } impl WsServer { fn close_all(&self) { // for s in &*self.sessions.lock().unwrap() { for s in &self.sessions { // if let Some(v) = s.upgrade(){ if s.connected() { // println!("sending WsClose"); // v.do_send(WsClose); s.do_send(WsClose); //WsMessage::Close(Some(CloseReason { code: CloseCode::Restart, description: None }))); } } } } /// new websocket connection #[derive(Message)] pub struct Connect { pub addr: Addr<WsSession>, } // impl Message for Connect { // type Result = usize; // } impl Handler<Connect> for WsServer { type Result = (); fn handle(&mut self, msg: Connect, _ctx: &mut Self::Context) -> Self::Result { // println!("{:?} joined wsserver", msg.addr); // let mut s = &mut *self.sessions.get_mut().unwrap(); let s = &mut self.sessions; s.push(msg.addr); //.downgrade()); println!( "new web socket added to server : {} sockets opened", s.len() ); } } /// websocket session disconnected #[derive(Message)] pub struct Disconnect { pub addr: Addr<WsSession>, // pub id : usize, } impl Handler<Disconnect> for WsServer { type Result = (); fn
(&mut self, msg: Disconnect, _ctx: &mut Self::Context) -> Self::Result { println!("a websocket session requested disconnect"); let mut s = 0; let mut f = false; // let mut ss = &mut *self.sessions.get_mut().unwrap(); let ss = &mut self.sessions; for i in 0..ss.len() { // if let Some(v) = self.sessions[i].upgrade(){ if ss[i] == msg.addr { // if ss[i] == msg.addr { // if v == msg.addr { s = i; f = true; break; // } } } if f { ss.remove(s); println!( "a websocket session removed from server : {} sockets opened", ss.len() ); } } } /// request to close all other connections #[derive(Message)] pub struct CloseAll; impl Handler<CloseAll> for WsServer { type Result = (); fn handle(&mut self, _msg: CloseAll, _ctx: &mut Self::Context) -> Self::Result { println!("received CloseAll"); self.close_all(); } }
handle
identifier_name
gimli.rs
_stash: $stash, } }}; } fn mmap(path: &Path) -> Option<Mmap> { let file = File::open(path).ok()?; let len = file.metadata().ok()?.len().try_into().ok()?; unsafe { Mmap::map(&file, len) } } cfg_if::cfg_if! { if #[cfg(windows)] { use core::mem::MaybeUninit; use super::super::windows::*; use mystd::os::windows::prelude::*; use alloc::vec; mod coff; use self::coff::Object; // For loading native libraries on Windows, see some discussion on // rust-lang/rust#71060 for the various strategies here. fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); unsafe { add_loaded_images(&mut ret); } return ret; } unsafe fn add_loaded_images(ret: &mut Vec<Library>) { let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0); if snap == INVALID_HANDLE_VALUE { return; } let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init(); me.dwSize = mem::size_of_val(&me) as DWORD; if Module32FirstW(snap, &mut me) == TRUE { loop { if let Some(lib) = load_library(&me) { ret.push(lib); } if Module32NextW(snap, &mut me)!= TRUE { break; } } } CloseHandle(snap); } unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> { let pos = me .szExePath .iter() .position(|i| *i == 0) .unwrap_or(me.szExePath.len()); let name = OsString::from_wide(&me.szExePath[..pos]); // MinGW libraries currently don't support ASLR // (rust-lang/rust#16514), but DLLs can still be relocated around in // the address space. It appears that addresses in debug info are // all as-if this library was loaded at its "image base", which is a // field in its COFF file headers. Since this is what debuginfo // seems to list we parse the symbol table and store addresses as if // the library was loaded at "image base" as well. // // The library may not be loaded at "image base", however. // (presumably something else may be loaded there?) This is where // the `bias` field comes into play, and we need to figure out the // value of `bias` here. Unfortunately though it's not clear how to // acquire this from a loaded module. What we do have, however, is // the actual load address (`modBaseAddr`). // // As a bit of a cop-out for now we mmap the file, read the file // header information, then drop the mmap. This is wasteful because // we'll probably reopen the mmap later, but this should work well // enough for now. // // Once we have the `image_base` (desired load location) and the // `base_addr` (actual load location) we can fill in the `bias` // (difference between the actual and desired) and then the stated // address of each segment is the `image_base` since that's what the // file says. // // For now it appears that unlike ELF/MachO we can make do with one // segment per library, using `modBaseSize` as the whole size. let mmap = mmap(name.as_ref())?; let image_base = coff::get_image_base(&mmap)?; let base_addr = me.modBaseAddr as usize; Some(Library { name, bias: base_addr.wrapping_sub(image_base), segments: vec![LibrarySegment { stated_virtual_memory_address: image_base, len: me.modBaseSize as usize, }], }) } } else if #[cfg(target_os = "macos")] { // macOS uses the Mach-O file format and uses DYLD-specific APIs to // load a list of native libraries that are part of the appplication. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod macho; use self::macho::Object; #[allow(deprecated)] fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); let images = unsafe { libc::_dyld_image_count() }; for i in 0..images { ret.extend(native_library(i)); } return ret; } #[allow(deprecated)] fn native_library(i: u32) -> Option<Library> { use object::macho; use object::read::macho::{MachHeader, Segment}; use object::{Bytes, NativeEndian}; // Fetch the name of this library which corresponds to the path of // where to load it as well. let name = unsafe { let name = libc::_dyld_get_image_name(i); if name.is_null() { return None; } CStr::from_ptr(name) }; // Load the image header of this library and delegate to `object` to // parse all the load commands so we can figure out all the segments // involved here. let (mut load_commands, endian) = unsafe { let header = libc::_dyld_get_image_header(i); if header.is_null() { return None; } match (*header).magic { macho::MH_MAGIC => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader32<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } macho::MH_MAGIC_64 => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader64<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } _ => return None, } }; // Iterate over the segments and register known regions for segments // that we find. Additionally record information bout text segments // for processing later, see comments below. let mut segments = Vec::new(); let mut first_text = 0; let mut text_fileoff_zero = false; while let Some(cmd) = load_commands.next().ok()? { if let Some((seg, _)) = cmd.segment_32().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } if let Some((seg, _)) = cmd.segment_64().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } } // Determine the "slide" for this library which ends up being the // bias we use to figure out where in memory objects are loaded. // This is a bit of a weird computation though and is the result of // trying a few things in the wild and seeing what sticks. // // The general idea is that the `bias` plus a segment's // `stated_virtual_memory_address` is going to be where in the // actual address space the segment resides. The other thing we rely // on though is that a real address minus the `bias` is the index to // look up in the symbol table and debuginfo. // // It turns out, though, that for system loaded libraries these // calculations are incorrect. For native executables, however, it // appears correct. Lifting some logic from LLDB's source it has // some special-casing for the first `__TEXT` section loaded from // file offset 0 with a nonzero size. For whatever reason when this // is present it appears to mean that the symbol table is relative // to just the vmaddr slide for the library. If it's *not* present // then the symbol table is relative to the the vmaddr slide plus // the segment's stated address. // // To handle this situation if we *don't* find a text section at // file offset zero then we increase the bias by the first text // sections's stated address and decrease all stated addresses by // that amount as well. That way the symbol table is always appears // relative to the library's bias amount. This appears to have the // right results for symbolizing via the symbol table. // // Honestly I'm not entirely sure whether this is right or if // there's something else that should indicate how to do this. For // now though this seems to work well enough (?) and we should // always be able to tweak this over time if necessary. // // For some more information see #318 let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize }; if!text_fileoff_zero { let adjust = segments[first_text].stated_virtual_memory_address; for segment in segments.iter_mut() { segment.stated_virtual_memory_address -= adjust; } slide += adjust; } Some(Library { name: OsStr::from_bytes(name.to_bytes()).to_owned(), segments, bias: slide, }) } } else if #[cfg(any( target_os = "linux", target_os = "fuchsia", ))] { // Other Unix (e.g. Linux) platforms use ELF as an object file format // and typically implement an API called `dl_iterate_phdr` to load // native libraries. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); unsafe { libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _); } return ret; } unsafe extern "C" fn callback( info: *mut libc::dl_phdr_info, _size: libc::size_t, vec: *mut libc::c_void, ) -> libc::c_int { let libs = &mut *(vec as *mut Vec<Library>); let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{ if libs.is_empty() { mystd::env::current_exe().map(|e| e.into()).unwrap_or_default() } else { OsString::new() } } else { let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes(); OsStr::from_bytes(bytes).to_owned() }; let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize); libs.push(Library { name, segments: headers .iter() .map(|header| LibrarySegment { len: (*header).p_memsz as usize, stated_virtual_memory_address: (*header).p_vaddr as usize, }) .collect(), bias: (*info).dlpi_addr as usize, }); 0 } } else if #[cfg(target_env = "libnx")] { // DevkitA64 doesn't natively support debug info, but the build system will place debug // info at the path `romfs:/debug_info.elf`. mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { extern "C" { static __start__: u8; } let bias = unsafe { &__start__ } as *const u8 as usize; let mut ret = Vec::new(); let mut segments = Vec::new(); segments.push(LibrarySegment { stated_virtual_memory_address: 0, len: usize::max_value() - bias, }); let path = "romfs:/debug_info.elf"; ret.push(Library { name: path.into(), segments, bias, }); ret } } else { // Everything else should use ELF, but doesn't know how to load native // libraries. use mystd::os::unix::prelude::*; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { Vec::new() } } } #[derive(Default)] struct Cache { /// All known shared libraries that have been loaded. libraries: Vec<Library>, /// Mappings cache where we retain parsed dwarf information. /// /// This list has a fixed capacity for its entire liftime which never /// increases. The `usize` element of each pair is an index into `libraries` /// above where `usize::max_value()` represents the current executable. The /// `Mapping` is corresponding parsed dwarf information. /// /// Note that this is basically an LRU cache and we'll be shifting things /// around in here as we symbolize addresses. mappings: Vec<(usize, Mapping)>, } struct Library { name: OsString, /// Segments of this library loaded into memory, and where they're loaded. segments: Vec<LibrarySegment>, /// The "bias" of this library, typically where it's loaded into memory. /// This value is added to each segment's stated address to get the actual /// virtual memory address that the segment is loaded into. Additionally /// this bias is subtracted from real virtual memory addresses to index into /// debuginfo and the symbol table. bias: usize, } struct LibrarySegment { /// The stated address of this segment in the object file. This is not /// actually where the segment is loaded, but rather this address plus the /// containing library's `bias` is where to find it. stated_virtual_memory_address: usize, /// The size of ths segment in memory. len: usize, } // unsafe because this is required to be externally synchronized pub unsafe fn clear_symbol_cache() { Cache::with_global(|cache| cache.mappings.clear()); } impl Cache { fn new() -> Cache { Cache { mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE), libraries: native_libraries(), } } // unsafe because this is required to be externally synchronized unsafe fn with_global(f: impl FnOnce(&mut Self))
fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> { self.libraries .iter() .enumerate() .filter_map(|(i, lib)| { // First up, test if this `lib` has any segment containing the // `addr` (handling relocation). If this check passes then we // can continue below and actually translate the address. // // Note that we're using `wrapping_add` here to avoid overflow
{ // A very small, very simple LRU cache for debug info mappings. // // The hit rate should be very high, since the typical stack doesn't cross // between many shared libraries. // // The `addr2line::Context` structures are pretty expensive to create. Its // cost is expected to be amortized by subsequent `locate` queries, which // leverage the structures built when constructing `addr2line::Context`s to // get nice speedups. If we didn't have this cache, that amortization would // never happen, and symbolicating backtraces would be ssssllllooooowwww. static mut MAPPINGS_CACHE: Option<Cache> = None; f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new())) }
identifier_body
gimli.rs
, _stash: $stash, } }}; } fn mmap(path: &Path) -> Option<Mmap> { let file = File::open(path).ok()?; let len = file.metadata().ok()?.len().try_into().ok()?; unsafe { Mmap::map(&file, len) } } cfg_if::cfg_if! { if #[cfg(windows)] { use core::mem::MaybeUninit; use super::super::windows::*; use mystd::os::windows::prelude::*; use alloc::vec; mod coff; use self::coff::Object; // For loading native libraries on Windows, see some discussion on // rust-lang/rust#71060 for the various strategies here. fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); unsafe { add_loaded_images(&mut ret); } return ret; } unsafe fn add_loaded_images(ret: &mut Vec<Library>) { let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0); if snap == INVALID_HANDLE_VALUE { return; } let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init(); me.dwSize = mem::size_of_val(&me) as DWORD; if Module32FirstW(snap, &mut me) == TRUE { loop { if let Some(lib) = load_library(&me) { ret.push(lib); } if Module32NextW(snap, &mut me)!= TRUE { break; } } } CloseHandle(snap); } unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> { let pos = me .szExePath .iter() .position(|i| *i == 0) .unwrap_or(me.szExePath.len()); let name = OsString::from_wide(&me.szExePath[..pos]); // MinGW libraries currently don't support ASLR // (rust-lang/rust#16514), but DLLs can still be relocated around in // the address space. It appears that addresses in debug info are // all as-if this library was loaded at its "image base", which is a // field in its COFF file headers. Since this is what debuginfo // seems to list we parse the symbol table and store addresses as if // the library was loaded at "image base" as well. // // The library may not be loaded at "image base", however. // (presumably something else may be loaded there?) This is where // the `bias` field comes into play, and we need to figure out the // value of `bias` here. Unfortunately though it's not clear how to // acquire this from a loaded module. What we do have, however, is // the actual load address (`modBaseAddr`). // // As a bit of a cop-out for now we mmap the file, read the file // header information, then drop the mmap. This is wasteful because // we'll probably reopen the mmap later, but this should work well // enough for now. // // Once we have the `image_base` (desired load location) and the // `base_addr` (actual load location) we can fill in the `bias` // (difference between the actual and desired) and then the stated // address of each segment is the `image_base` since that's what the // file says. // // For now it appears that unlike ELF/MachO we can make do with one // segment per library, using `modBaseSize` as the whole size. let mmap = mmap(name.as_ref())?; let image_base = coff::get_image_base(&mmap)?; let base_addr = me.modBaseAddr as usize; Some(Library { name, bias: base_addr.wrapping_sub(image_base), segments: vec![LibrarySegment { stated_virtual_memory_address: image_base, len: me.modBaseSize as usize, }], }) } } else if #[cfg(target_os = "macos")] { // macOS uses the Mach-O file format and uses DYLD-specific APIs to // load a list of native libraries that are part of the appplication. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod macho; use self::macho::Object; #[allow(deprecated)] fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); let images = unsafe { libc::_dyld_image_count() }; for i in 0..images { ret.extend(native_library(i)); } return ret; } #[allow(deprecated)] fn native_library(i: u32) -> Option<Library> { use object::macho; use object::read::macho::{MachHeader, Segment}; use object::{Bytes, NativeEndian}; // Fetch the name of this library which corresponds to the path of // where to load it as well. let name = unsafe { let name = libc::_dyld_get_image_name(i); if name.is_null() { return None; } CStr::from_ptr(name) }; // Load the image header of this library and delegate to `object` to // parse all the load commands so we can figure out all the segments // involved here. let (mut load_commands, endian) = unsafe { let header = libc::_dyld_get_image_header(i); if header.is_null() { return None; } match (*header).magic { macho::MH_MAGIC => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader32<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } macho::MH_MAGIC_64 => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader64<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } _ => return None, } }; // Iterate over the segments and register known regions for segments // that we find. Additionally record information bout text segments // for processing later, see comments below. let mut segments = Vec::new(); let mut first_text = 0; let mut text_fileoff_zero = false; while let Some(cmd) = load_commands.next().ok()? { if let Some((seg, _)) = cmd.segment_32().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } if let Some((seg, _)) = cmd.segment_64().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } } // Determine the "slide" for this library which ends up being the // bias we use to figure out where in memory objects are loaded. // This is a bit of a weird computation though and is the result of // trying a few things in the wild and seeing what sticks. // // The general idea is that the `bias` plus a segment's // `stated_virtual_memory_address` is going to be where in the // actual address space the segment resides. The other thing we rely // on though is that a real address minus the `bias` is the index to // look up in the symbol table and debuginfo. // // It turns out, though, that for system loaded libraries these // calculations are incorrect. For native executables, however, it // appears correct. Lifting some logic from LLDB's source it has // some special-casing for the first `__TEXT` section loaded from // file offset 0 with a nonzero size. For whatever reason when this // is present it appears to mean that the symbol table is relative // to just the vmaddr slide for the library. If it's *not* present // then the symbol table is relative to the the vmaddr slide plus // the segment's stated address. // // To handle this situation if we *don't* find a text section at // file offset zero then we increase the bias by the first text // sections's stated address and decrease all stated addresses by // that amount as well. That way the symbol table is always appears // relative to the library's bias amount. This appears to have the // right results for symbolizing via the symbol table. // // Honestly I'm not entirely sure whether this is right or if // there's something else that should indicate how to do this. For // now though this seems to work well enough (?) and we should // always be able to tweak this over time if necessary. // // For some more information see #318 let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize }; if!text_fileoff_zero { let adjust = segments[first_text].stated_virtual_memory_address; for segment in segments.iter_mut() { segment.stated_virtual_memory_address -= adjust; } slide += adjust; } Some(Library { name: OsStr::from_bytes(name.to_bytes()).to_owned(), segments, bias: slide, }) } } else if #[cfg(any( target_os = "linux", target_os = "fuchsia", ))] { // Other Unix (e.g. Linux) platforms use ELF as an object file format // and typically implement an API called `dl_iterate_phdr` to load // native libraries. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); unsafe { libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _); } return ret; } unsafe extern "C" fn callback( info: *mut libc::dl_phdr_info, _size: libc::size_t, vec: *mut libc::c_void, ) -> libc::c_int { let libs = &mut *(vec as *mut Vec<Library>); let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{ if libs.is_empty() { mystd::env::current_exe().map(|e| e.into()).unwrap_or_default() } else { OsString::new() } } else { let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes(); OsStr::from_bytes(bytes).to_owned() }; let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize); libs.push(Library { name, segments: headers .iter() .map(|header| LibrarySegment { len: (*header).p_memsz as usize, stated_virtual_memory_address: (*header).p_vaddr as usize, }) .collect(), bias: (*info).dlpi_addr as usize, }); 0 } } else if #[cfg(target_env = "libnx")] { // DevkitA64 doesn't natively support debug info, but the build system will place debug // info at the path `romfs:/debug_info.elf`. mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { extern "C" { static __start__: u8; } let bias = unsafe { &__start__ } as *const u8 as usize; let mut ret = Vec::new(); let mut segments = Vec::new(); segments.push(LibrarySegment { stated_virtual_memory_address: 0, len: usize::max_value() - bias, }); let path = "romfs:/debug_info.elf"; ret.push(Library { name: path.into(), segments, bias, }); ret } } else { // Everything else should use ELF, but doesn't know how to load native // libraries. use mystd::os::unix::prelude::*; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { Vec::new() } } } #[derive(Default)] struct Cache { /// All known shared libraries that have been loaded. libraries: Vec<Library>, /// Mappings cache where we retain parsed dwarf information. /// /// This list has a fixed capacity for its entire liftime which never /// increases. The `usize` element of each pair is an index into `libraries` /// above where `usize::max_value()` represents the current executable. The /// `Mapping` is corresponding parsed dwarf information. ///
} struct Library { name: OsString, /// Segments of this library loaded into memory, and where they're loaded. segments: Vec<LibrarySegment>, /// The "bias" of this library, typically where it's loaded into memory. /// This value is added to each segment's stated address to get the actual /// virtual memory address that the segment is loaded into. Additionally /// this bias is subtracted from real virtual memory addresses to index into /// debuginfo and the symbol table. bias: usize, } struct LibrarySegment { /// The stated address of this segment in the object file. This is not /// actually where the segment is loaded, but rather this address plus the /// containing library's `bias` is where to find it. stated_virtual_memory_address: usize, /// The size of ths segment in memory. len: usize, } // unsafe because this is required to be externally synchronized pub unsafe fn clear_symbol_cache() { Cache::with_global(|cache| cache.mappings.clear()); } impl Cache { fn new() -> Cache { Cache { mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE), libraries: native_libraries(), } } // unsafe because this is required to be externally synchronized unsafe fn with_global(f: impl FnOnce(&mut Self)) { // A very small, very simple LRU cache for debug info mappings. // // The hit rate should be very high, since the typical stack doesn't cross // between many shared libraries. // // The `addr2line::Context` structures are pretty expensive to create. Its // cost is expected to be amortized by subsequent `locate` queries, which // leverage the structures built when constructing `addr2line::Context`s to // get nice speedups. If we didn't have this cache, that amortization would // never happen, and symbolicating backtraces would be ssssllllooooowwww. static mut MAPPINGS_CACHE: Option<Cache> = None; f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new())) } fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> { self.libraries .iter() .enumerate() .filter_map(|(i, lib)| { // First up, test if this `lib` has any segment containing the // `addr` (handling relocation). If this check passes then we // can continue below and actually translate the address. // // Note that we're using `wrapping_add` here to avoid overflow
/// Note that this is basically an LRU cache and we'll be shifting things /// around in here as we symbolize addresses. mappings: Vec<(usize, Mapping)>,
random_line_split
gimli.rs
if let Some(lib) = load_library(&me) { ret.push(lib); } if Module32NextW(snap, &mut me)!= TRUE { break; } } } CloseHandle(snap); } unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> { let pos = me .szExePath .iter() .position(|i| *i == 0) .unwrap_or(me.szExePath.len()); let name = OsString::from_wide(&me.szExePath[..pos]); // MinGW libraries currently don't support ASLR // (rust-lang/rust#16514), but DLLs can still be relocated around in // the address space. It appears that addresses in debug info are // all as-if this library was loaded at its "image base", which is a // field in its COFF file headers. Since this is what debuginfo // seems to list we parse the symbol table and store addresses as if // the library was loaded at "image base" as well. // // The library may not be loaded at "image base", however. // (presumably something else may be loaded there?) This is where // the `bias` field comes into play, and we need to figure out the // value of `bias` here. Unfortunately though it's not clear how to // acquire this from a loaded module. What we do have, however, is // the actual load address (`modBaseAddr`). // // As a bit of a cop-out for now we mmap the file, read the file // header information, then drop the mmap. This is wasteful because // we'll probably reopen the mmap later, but this should work well // enough for now. // // Once we have the `image_base` (desired load location) and the // `base_addr` (actual load location) we can fill in the `bias` // (difference between the actual and desired) and then the stated // address of each segment is the `image_base` since that's what the // file says. // // For now it appears that unlike ELF/MachO we can make do with one // segment per library, using `modBaseSize` as the whole size. let mmap = mmap(name.as_ref())?; let image_base = coff::get_image_base(&mmap)?; let base_addr = me.modBaseAddr as usize; Some(Library { name, bias: base_addr.wrapping_sub(image_base), segments: vec![LibrarySegment { stated_virtual_memory_address: image_base, len: me.modBaseSize as usize, }], }) } } else if #[cfg(target_os = "macos")] { // macOS uses the Mach-O file format and uses DYLD-specific APIs to // load a list of native libraries that are part of the appplication. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod macho; use self::macho::Object; #[allow(deprecated)] fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); let images = unsafe { libc::_dyld_image_count() }; for i in 0..images { ret.extend(native_library(i)); } return ret; } #[allow(deprecated)] fn native_library(i: u32) -> Option<Library> { use object::macho; use object::read::macho::{MachHeader, Segment}; use object::{Bytes, NativeEndian}; // Fetch the name of this library which corresponds to the path of // where to load it as well. let name = unsafe { let name = libc::_dyld_get_image_name(i); if name.is_null() { return None; } CStr::from_ptr(name) }; // Load the image header of this library and delegate to `object` to // parse all the load commands so we can figure out all the segments // involved here. let (mut load_commands, endian) = unsafe { let header = libc::_dyld_get_image_header(i); if header.is_null() { return None; } match (*header).magic { macho::MH_MAGIC => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader32<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } macho::MH_MAGIC_64 => { let endian = NativeEndian; let header = &*(header as *const macho::MachHeader64<NativeEndian>); let data = core::slice::from_raw_parts( header as *const _ as *const u8, mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize ); (header.load_commands(endian, Bytes(data)).ok()?, endian) } _ => return None, } }; // Iterate over the segments and register known regions for segments // that we find. Additionally record information bout text segments // for processing later, see comments below. let mut segments = Vec::new(); let mut first_text = 0; let mut text_fileoff_zero = false; while let Some(cmd) = load_commands.next().ok()? { if let Some((seg, _)) = cmd.segment_32().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } if let Some((seg, _)) = cmd.segment_64().ok()? { if seg.name() == b"__TEXT" { first_text = segments.len(); if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { text_fileoff_zero = true; } } segments.push(LibrarySegment { len: seg.vmsize(endian).try_into().ok()?, stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, }); } } // Determine the "slide" for this library which ends up being the // bias we use to figure out where in memory objects are loaded. // This is a bit of a weird computation though and is the result of // trying a few things in the wild and seeing what sticks. // // The general idea is that the `bias` plus a segment's // `stated_virtual_memory_address` is going to be where in the // actual address space the segment resides. The other thing we rely // on though is that a real address minus the `bias` is the index to // look up in the symbol table and debuginfo. // // It turns out, though, that for system loaded libraries these // calculations are incorrect. For native executables, however, it // appears correct. Lifting some logic from LLDB's source it has // some special-casing for the first `__TEXT` section loaded from // file offset 0 with a nonzero size. For whatever reason when this // is present it appears to mean that the symbol table is relative // to just the vmaddr slide for the library. If it's *not* present // then the symbol table is relative to the the vmaddr slide plus // the segment's stated address. // // To handle this situation if we *don't* find a text section at // file offset zero then we increase the bias by the first text // sections's stated address and decrease all stated addresses by // that amount as well. That way the symbol table is always appears // relative to the library's bias amount. This appears to have the // right results for symbolizing via the symbol table. // // Honestly I'm not entirely sure whether this is right or if // there's something else that should indicate how to do this. For // now though this seems to work well enough (?) and we should // always be able to tweak this over time if necessary. // // For some more information see #318 let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize }; if!text_fileoff_zero { let adjust = segments[first_text].stated_virtual_memory_address; for segment in segments.iter_mut() { segment.stated_virtual_memory_address -= adjust; } slide += adjust; } Some(Library { name: OsStr::from_bytes(name.to_bytes()).to_owned(), segments, bias: slide, }) } } else if #[cfg(any( target_os = "linux", target_os = "fuchsia", ))] { // Other Unix (e.g. Linux) platforms use ELF as an object file format // and typically implement an API called `dl_iterate_phdr` to load // native libraries. use mystd::os::unix::prelude::*; use mystd::ffi::{OsStr, CStr}; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { let mut ret = Vec::new(); unsafe { libc::dl_iterate_phdr(Some(callback), &mut ret as *mut _ as *mut _); } return ret; } unsafe extern "C" fn callback( info: *mut libc::dl_phdr_info, _size: libc::size_t, vec: *mut libc::c_void, ) -> libc::c_int { let libs = &mut *(vec as *mut Vec<Library>); let name = if (*info).dlpi_name.is_null() || *(*info).dlpi_name == 0{ if libs.is_empty() { mystd::env::current_exe().map(|e| e.into()).unwrap_or_default() } else { OsString::new() } } else { let bytes = CStr::from_ptr((*info).dlpi_name).to_bytes(); OsStr::from_bytes(bytes).to_owned() }; let headers = core::slice::from_raw_parts((*info).dlpi_phdr, (*info).dlpi_phnum as usize); libs.push(Library { name, segments: headers .iter() .map(|header| LibrarySegment { len: (*header).p_memsz as usize, stated_virtual_memory_address: (*header).p_vaddr as usize, }) .collect(), bias: (*info).dlpi_addr as usize, }); 0 } } else if #[cfg(target_env = "libnx")] { // DevkitA64 doesn't natively support debug info, but the build system will place debug // info at the path `romfs:/debug_info.elf`. mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { extern "C" { static __start__: u8; } let bias = unsafe { &__start__ } as *const u8 as usize; let mut ret = Vec::new(); let mut segments = Vec::new(); segments.push(LibrarySegment { stated_virtual_memory_address: 0, len: usize::max_value() - bias, }); let path = "romfs:/debug_info.elf"; ret.push(Library { name: path.into(), segments, bias, }); ret } } else { // Everything else should use ELF, but doesn't know how to load native // libraries. use mystd::os::unix::prelude::*; mod elf; use self::elf::Object; fn native_libraries() -> Vec<Library> { Vec::new() } } } #[derive(Default)] struct Cache { /// All known shared libraries that have been loaded. libraries: Vec<Library>, /// Mappings cache where we retain parsed dwarf information. /// /// This list has a fixed capacity for its entire liftime which never /// increases. The `usize` element of each pair is an index into `libraries` /// above where `usize::max_value()` represents the current executable. The /// `Mapping` is corresponding parsed dwarf information. /// /// Note that this is basically an LRU cache and we'll be shifting things /// around in here as we symbolize addresses. mappings: Vec<(usize, Mapping)>, } struct Library { name: OsString, /// Segments of this library loaded into memory, and where they're loaded. segments: Vec<LibrarySegment>, /// The "bias" of this library, typically where it's loaded into memory. /// This value is added to each segment's stated address to get the actual /// virtual memory address that the segment is loaded into. Additionally /// this bias is subtracted from real virtual memory addresses to index into /// debuginfo and the symbol table. bias: usize, } struct LibrarySegment { /// The stated address of this segment in the object file. This is not /// actually where the segment is loaded, but rather this address plus the /// containing library's `bias` is where to find it. stated_virtual_memory_address: usize, /// The size of ths segment in memory. len: usize, } // unsafe because this is required to be externally synchronized pub unsafe fn clear_symbol_cache() { Cache::with_global(|cache| cache.mappings.clear()); } impl Cache { fn new() -> Cache { Cache { mappings: Vec::with_capacity(MAPPINGS_CACHE_SIZE), libraries: native_libraries(), } } // unsafe because this is required to be externally synchronized unsafe fn with_global(f: impl FnOnce(&mut Self)) { // A very small, very simple LRU cache for debug info mappings. // // The hit rate should be very high, since the typical stack doesn't cross // between many shared libraries. // // The `addr2line::Context` structures are pretty expensive to create. Its // cost is expected to be amortized by subsequent `locate` queries, which // leverage the structures built when constructing `addr2line::Context`s to // get nice speedups. If we didn't have this cache, that amortization would // never happen, and symbolicating backtraces would be ssssllllooooowwww. static mut MAPPINGS_CACHE: Option<Cache> = None; f(MAPPINGS_CACHE.get_or_insert_with(|| Cache::new())) } fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> { self.libraries .iter() .enumerate() .filter_map(|(i, lib)| { // First up, test if this `lib` has any segment containing the // `addr` (handling relocation). If this check passes then we // can continue below and actually translate the address. // // Note that we're using `wrapping_add` here to avoid overflow // checks. It's been seen in the wild that the SVMA + bias // computation overflows. It seems a bit odd that would happen // but there's not a huge amount we can do about it other than // probably just ignore those segments since they're likely // pointing off into space. This originally came up in // rust-lang/backtrace-rs#329. if!lib.segments.iter().any(|s| { let svma = s.stated_virtual_memory_address; let start = svma.wrapping_add(lib.bias); let end = start.wrapping_add(s.len); let address = addr as usize; start <= address && address < end }) { return None; } // Now that we know `lib` contains `addr`, we can offset with // the bias to find the stated virutal memory address. let svma = (addr as usize).wrapping_sub(lib.bias); Some((i, svma as *const u8)) }) .next() } fn
mapping_for_lib
identifier_name
types.rs
use javascriptcore_sys::*; use std::convert::TryFrom;
macro_rules! retain_release { ($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => { impl Drop for $name { fn drop(&mut self) { unsafe { $drop_fn(self.0) }; } } impl Clone for $name { fn clone(&self) -> $name { let x = unsafe { $retain_fn(self.0) }; $name(x) } } impl Deref for $name { type Target = $ffi_ref; fn deref(&self) -> &$ffi_ref { &self.0 } } }; } unsafe impl Send for GlobalContext {} unsafe impl Sync for GlobalContext {} unsafe impl Send for Context {} unsafe impl Sync for Context {} unsafe impl Send for String {} unsafe impl Sync for String {} unsafe impl Send for Object {} unsafe impl Sync for Object {} unsafe impl Send for ContextGroup {} unsafe impl Sync for ContextGroup {} unsafe impl Send for Value {} unsafe impl Sync for Value {} #[derive(Copy, Clone, Debug)] pub struct Context(pub(crate) JSContextRef); pub struct ContextGroup(pub(crate) JSContextGroupRef); pub struct GlobalContext(pub(crate) JSGlobalContextRef); pub struct Object(pub(crate) Context, pub(crate) JSObjectRef); pub struct String(pub(crate) JSStringRef); use std::fmt; impl fmt::Debug for Object { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut s = f.debug_struct("Object"); unsafe { let array = JSObjectCopyPropertyNames(*self.0, self.1); let size = JSPropertyNameArrayGetCount(array); for i in 0..size { let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i); let prop_name = std::string::String::from(&String(js_ref)); let prop_value = Value::from( self.0, JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()), ); s.field(&prop_name, &format!("{:?}", prop_value)); } } s.finish() } } impl fmt::Debug for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Exception") .field("stack", &self.stack()) .field("message", &self.message()) .finish() } } impl fmt::Display for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Message: {}", &self.message())?; writeln!(f, "Stack:")?; write!(f, "{}", self.stack()) } } #[derive(Debug, Copy, Clone)] pub enum ValueType { Undefined, Null, Boolean, Number, String, Object, Symbol, } #[derive(Debug)] pub struct Value( pub(crate) JSValueRef, pub(crate) ValueType, pub(crate) Context, ); pub trait ContextType { unsafe fn as_ptr(&self) -> JSContextRef; fn undefined(&self) -> Value { let ptr = unsafe { self.as_ptr() }; let value = unsafe { JSValueMakeUndefined(ptr) }; Value(value, ValueType::Undefined, Context(ptr)) } } impl ContextType for GlobalContext { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl ContextType for Context { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl Deref for Context { type Target = JSContextRef; fn deref(&self) -> &JSContextRef { &self.0 } } impl Deref for Object { type Target = JSObjectRef; fn deref(&self) -> &JSObjectRef { &self.1 } } retain_release!( ContextGroup, JSContextGroupRef, JSContextGroupRetain, JSContextGroupRelease ); retain_release!( GlobalContext, JSGlobalContextRef, JSGlobalContextRetain, JSGlobalContextRelease ); retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease); impl ContextGroup { pub fn new() -> ContextGroup { let ptr = unsafe { JSContextGroupCreate() }; ContextGroup(ptr) } pub fn create_global_context(&self) -> GlobalContext { let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) }; GlobalContext(ptr) } } pub struct Exception(Object); impl Exception { pub fn stack(&self) -> std::string::String { let stack_val = self .0 .get_property(&String::new("stack").unwrap()) .expect("no `stack` property found"); let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`"); std::string::String::from(&stack_str) } pub fn message(&self) -> std::string::String { let message_val = self .0 .get_property(&String::new("message").unwrap()) .expect("no `message` property found"); let message_str = String::try_from(&message_val).expect("no string property found for `message`"); std::string::String::from(&message_str) } } impl GlobalContext { pub fn global_object(&self) -> Object { let ptr = unsafe { JSContextGetGlobalObject(self.0) }; Object(Context(self.0), ptr) } pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> { let mut exception = null(); let ret = unsafe { JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception) }; if exception == null_mut() { Ok(Value::from(Context(self.0), ret)) } else { let value = Value::from(Context(self.0), exception); let obj = Object::try_from(&value).unwrap(); Err(Exception(obj)) } } pub async fn evaluate_script<'a>(&'a self, script: &'a String) -> Result<Value, Exception> { self.evaluate_script_sync(script) } pub fn add_function( &self, name: &str, callback: JsCallback, ) -> Result<(), Box<dyn std::error::Error>> { let name = String::new(name).unwrap(); let obj = self.global_object(); let fn_obj = obj.make_function_with_callback(&name, callback); obj.set_property(&name, Value::from(Context(self.0), *fn_obj)); Ok(()) } } type JsCallback = fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>; extern "C" fn callback_trampoline( ctx: JSContextRef, function: JSObjectRef, this_object: JSObjectRef, argument_count: usize, arguments: *const JSValueRef, exception: *mut JSValueRef, ) -> JSValueRef { let callback = unsafe { std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function)) }; let ctx = Context(ctx); let args = unsafe { std::slice::from_raw_parts(arguments, argument_count) .into_iter() .map(|v| Value::from(ctx, *v)) .collect::<Vec<_>>() }; match callback(ctx, Object(ctx, this_object), args) { Ok(v) => v.0, Err(e) => unsafe { *exception = e.to_js_value(&ctx); JSValueMakeUndefined(ctx.0) }, } } impl ValueType { unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType { let raw_ty = JSValueGetType(ctx.0, value_ref); match raw_ty { 0 => ValueType::Undefined, 1 => ValueType::Null, 2 => ValueType::Boolean, 3 => ValueType::Number, 4 => ValueType::String, 5 => ValueType::Object, 6 => ValueType::Symbol, _ => unreachable!(), } } } impl Value { fn from(ctx: Context, value_ref: JSValueRef) -> Value { Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx) } pub fn to_string(&self) -> std::string::String { match self.js_type() { ValueType::String => { let js_str = String::try_from(self).expect("string"); std::string::String::from(&js_str) } ValueType::Number => { let n = f64::try_from(self).expect("f64"); format!("{}", n) } ValueType::Boolean => { let v = bool::try_from(self).expect("bool"); format!("{}", v) } ValueType::Null => "null".into(), ValueType::Undefined => "undefined".into(), ValueType::Symbol => "Symbol(...)".into(), ValueType::Object => { let obj = Object::try_from(self).expect("object"); format!("{:?}", obj) } } } } fn rust_function_defn(name: &String) -> JSClassDefinition { JSClassDefinition { version: 0, attributes: 0, className: **name as *const _, parentClass: null_mut(), staticValues: null(), staticFunctions: null(), initialize: None, finalize: None, hasProperty: None, getProperty: None, setProperty: None, deleteProperty: None, getPropertyNames: None, callAsFunction: Some(callback_trampoline), callAsConstructor: None, hasInstance: None, convertToType: None, } } impl Value { pub fn js_type(&self) -> ValueType { self.1 } } impl Object { pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object { let cls = unsafe { JSClassCreate(&rust_function_defn(name)) }; let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) }; if unsafe { JSObjectGetPrivate(ptr) } == null_mut() { panic!("No private"); } Object(self.0, ptr) } pub fn set_property(&self, name: &String, value: Value) { unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) }; } pub fn get_property(&self, name: &String) -> Result<Value, Value> { let mut exception = null(); let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) }; if exception == null() { Ok(Value::from(self.0, ret)) } else { Err(Value::from(self.0, exception)) } } pub fn to_js_value(&self) -> Value { Value(self.1, ValueType::Object, self.0) } } impl String { pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> { let s = CString::new(s)?; let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) }; Ok(String(v)) } pub fn to_js_value(&self, ctx: &Context) -> JSValueRef { unsafe { JSValueMakeString(**ctx, self.0) } } }
use std::ffi::CString; use std::ops::Deref; use std::ptr::{null, null_mut};
random_line_split
types.rs
use javascriptcore_sys::*; use std::convert::TryFrom; use std::ffi::CString; use std::ops::Deref; use std::ptr::{null, null_mut}; macro_rules! retain_release { ($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => { impl Drop for $name { fn drop(&mut self) { unsafe { $drop_fn(self.0) }; } } impl Clone for $name { fn clone(&self) -> $name { let x = unsafe { $retain_fn(self.0) }; $name(x) } } impl Deref for $name { type Target = $ffi_ref; fn deref(&self) -> &$ffi_ref { &self.0 } } }; } unsafe impl Send for GlobalContext {} unsafe impl Sync for GlobalContext {} unsafe impl Send for Context {} unsafe impl Sync for Context {} unsafe impl Send for String {} unsafe impl Sync for String {} unsafe impl Send for Object {} unsafe impl Sync for Object {} unsafe impl Send for ContextGroup {} unsafe impl Sync for ContextGroup {} unsafe impl Send for Value {} unsafe impl Sync for Value {} #[derive(Copy, Clone, Debug)] pub struct Context(pub(crate) JSContextRef); pub struct ContextGroup(pub(crate) JSContextGroupRef); pub struct GlobalContext(pub(crate) JSGlobalContextRef); pub struct Object(pub(crate) Context, pub(crate) JSObjectRef); pub struct String(pub(crate) JSStringRef); use std::fmt; impl fmt::Debug for Object { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} impl fmt::Debug for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Exception") .field("stack", &self.stack()) .field("message", &self.message()) .finish() } } impl fmt::Display for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Message: {}", &self.message())?; writeln!(f, "Stack:")?; write!(f, "{}", self.stack()) } } #[derive(Debug, Copy, Clone)] pub enum ValueType { Undefined, Null, Boolean, Number, String, Object, Symbol, } #[derive(Debug)] pub struct Value( pub(crate) JSValueRef, pub(crate) ValueType, pub(crate) Context, ); pub trait ContextType { unsafe fn as_ptr(&self) -> JSContextRef; fn undefined(&self) -> Value { let ptr = unsafe { self.as_ptr() }; let value = unsafe { JSValueMakeUndefined(ptr) }; Value(value, ValueType::Undefined, Context(ptr)) } } impl ContextType for GlobalContext { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl ContextType for Context { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl Deref for Context { type Target = JSContextRef; fn deref(&self) -> &JSContextRef { &self.0 } } impl Deref for Object { type Target = JSObjectRef; fn deref(&self) -> &JSObjectRef { &self.1 } } retain_release!( ContextGroup, JSContextGroupRef, JSContextGroupRetain, JSContextGroupRelease ); retain_release!( GlobalContext, JSGlobalContextRef, JSGlobalContextRetain, JSGlobalContextRelease ); retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease); impl ContextGroup { pub fn new() -> ContextGroup { let ptr = unsafe { JSContextGroupCreate() }; ContextGroup(ptr) } pub fn create_global_context(&self) -> GlobalContext { let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) }; GlobalContext(ptr) } } pub struct Exception(Object); impl Exception { pub fn stack(&self) -> std::string::String { let stack_val = self .0 .get_property(&String::new("stack").unwrap()) .expect("no `stack` property found"); let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`"); std::string::String::from(&stack_str) } pub fn message(&self) -> std::string::String { let message_val = self .0 .get_property(&String::new("message").unwrap()) .expect("no `message` property found"); let message_str = String::try_from(&message_val).expect("no string property found for `message`"); std::string::String::from(&message_str) } } impl GlobalContext { pub fn global_object(&self) -> Object { let ptr = unsafe { JSContextGetGlobalObject(self.0) }; Object(Context(self.0), ptr) } pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> { let mut exception = null(); let ret = unsafe { JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception) }; if exception == null_mut() { Ok(Value::from(Context(self.0), ret)) } else { let value = Value::from(Context(self.0), exception); let obj = Object::try_from(&value).unwrap(); Err(Exception(obj)) } } pub async fn evaluate_script<'a>(&'a self, script: &'a String) -> Result<Value, Exception> { self.evaluate_script_sync(script) } pub fn add_function( &self, name: &str, callback: JsCallback, ) -> Result<(), Box<dyn std::error::Error>> { let name = String::new(name).unwrap(); let obj = self.global_object(); let fn_obj = obj.make_function_with_callback(&name, callback); obj.set_property(&name, Value::from(Context(self.0), *fn_obj)); Ok(()) } } type JsCallback = fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>; extern "C" fn callback_trampoline( ctx: JSContextRef, function: JSObjectRef, this_object: JSObjectRef, argument_count: usize, arguments: *const JSValueRef, exception: *mut JSValueRef, ) -> JSValueRef { let callback = unsafe { std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function)) }; let ctx = Context(ctx); let args = unsafe { std::slice::from_raw_parts(arguments, argument_count) .into_iter() .map(|v| Value::from(ctx, *v)) .collect::<Vec<_>>() }; match callback(ctx, Object(ctx, this_object), args) { Ok(v) => v.0, Err(e) => unsafe { *exception = e.to_js_value(&ctx); JSValueMakeUndefined(ctx.0) }, } } impl ValueType { unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType { let raw_ty = JSValueGetType(ctx.0, value_ref); match raw_ty { 0 => ValueType::Undefined, 1 => ValueType::Null, 2 => ValueType::Boolean, 3 => ValueType::Number, 4 => ValueType::String, 5 => ValueType::Object, 6 => ValueType::Symbol, _ => unreachable!(), } } } impl Value { fn from(ctx: Context, value_ref: JSValueRef) -> Value { Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx) } pub fn to_string(&self) -> std::string::String { match self.js_type() { ValueType::String => { let js_str = String::try_from(self).expect("string"); std::string::String::from(&js_str) } ValueType::Number => { let n = f64::try_from(self).expect("f64"); format!("{}", n) } ValueType::Boolean => { let v = bool::try_from(self).expect("bool"); format!("{}", v) } ValueType::Null => "null".into(), ValueType::Undefined => "undefined".into(), ValueType::Symbol => "Symbol(...)".into(), ValueType::Object => { let obj = Object::try_from(self).expect("object"); format!("{:?}", obj) } } } } fn rust_function_defn(name: &String) -> JSClassDefinition { JSClassDefinition { version: 0, attributes: 0, className: **name as *const _, parentClass: null_mut(), staticValues: null(), staticFunctions: null(), initialize: None, finalize: None, hasProperty: None, getProperty: None, setProperty: None, deleteProperty: None, getPropertyNames: None, callAsFunction: Some(callback_trampoline), callAsConstructor: None, hasInstance: None, convertToType: None, } } impl Value { pub fn js_type(&self) -> ValueType { self.1 } } impl Object { pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object { let cls = unsafe { JSClassCreate(&rust_function_defn(name)) }; let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) }; if unsafe { JSObjectGetPrivate(ptr) } == null_mut() { panic!("No private"); } Object(self.0, ptr) } pub fn set_property(&self, name: &String, value: Value) { unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) }; } pub fn get_property(&self, name: &String) -> Result<Value, Value> { let mut exception = null(); let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) }; if exception == null() { Ok(Value::from(self.0, ret)) } else { Err(Value::from(self.0, exception)) } } pub fn to_js_value(&self) -> Value { Value(self.1, ValueType::Object, self.0) } } impl String { pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> { let s = CString::new(s)?; let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) }; Ok(String(v)) } pub fn to_js_value(&self, ctx: &Context) -> JSValueRef { unsafe { JSValueMakeString(**ctx, self.0) } } }
{ let mut s = f.debug_struct("Object"); unsafe { let array = JSObjectCopyPropertyNames(*self.0, self.1); let size = JSPropertyNameArrayGetCount(array); for i in 0..size { let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i); let prop_name = std::string::String::from(&String(js_ref)); let prop_value = Value::from( self.0, JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()), ); s.field(&prop_name, &format!("{:?}", prop_value)); } } s.finish() }
identifier_body
types.rs
use javascriptcore_sys::*; use std::convert::TryFrom; use std::ffi::CString; use std::ops::Deref; use std::ptr::{null, null_mut}; macro_rules! retain_release { ($name:ident, $ffi_ref:ty, $retain_fn:tt, $drop_fn:tt) => { impl Drop for $name { fn drop(&mut self) { unsafe { $drop_fn(self.0) }; } } impl Clone for $name { fn clone(&self) -> $name { let x = unsafe { $retain_fn(self.0) }; $name(x) } } impl Deref for $name { type Target = $ffi_ref; fn deref(&self) -> &$ffi_ref { &self.0 } } }; } unsafe impl Send for GlobalContext {} unsafe impl Sync for GlobalContext {} unsafe impl Send for Context {} unsafe impl Sync for Context {} unsafe impl Send for String {} unsafe impl Sync for String {} unsafe impl Send for Object {} unsafe impl Sync for Object {} unsafe impl Send for ContextGroup {} unsafe impl Sync for ContextGroup {} unsafe impl Send for Value {} unsafe impl Sync for Value {} #[derive(Copy, Clone, Debug)] pub struct Context(pub(crate) JSContextRef); pub struct ContextGroup(pub(crate) JSContextGroupRef); pub struct GlobalContext(pub(crate) JSGlobalContextRef); pub struct Object(pub(crate) Context, pub(crate) JSObjectRef); pub struct String(pub(crate) JSStringRef); use std::fmt; impl fmt::Debug for Object { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut s = f.debug_struct("Object"); unsafe { let array = JSObjectCopyPropertyNames(*self.0, self.1); let size = JSPropertyNameArrayGetCount(array); for i in 0..size { let js_ref = JSPropertyNameArrayGetNameAtIndex(array, i); let prop_name = std::string::String::from(&String(js_ref)); let prop_value = Value::from( self.0, JSObjectGetPropertyAtIndex(*self.0, self.1, i as u32, null_mut()), ); s.field(&prop_name, &format!("{:?}", prop_value)); } } s.finish() } } impl fmt::Debug for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Exception") .field("stack", &self.stack()) .field("message", &self.message()) .finish() } } impl fmt::Display for Exception { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Message: {}", &self.message())?; writeln!(f, "Stack:")?; write!(f, "{}", self.stack()) } } #[derive(Debug, Copy, Clone)] pub enum ValueType { Undefined, Null, Boolean, Number, String, Object, Symbol, } #[derive(Debug)] pub struct Value( pub(crate) JSValueRef, pub(crate) ValueType, pub(crate) Context, ); pub trait ContextType { unsafe fn as_ptr(&self) -> JSContextRef; fn undefined(&self) -> Value { let ptr = unsafe { self.as_ptr() }; let value = unsafe { JSValueMakeUndefined(ptr) }; Value(value, ValueType::Undefined, Context(ptr)) } } impl ContextType for GlobalContext { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl ContextType for Context { unsafe fn as_ptr(&self) -> JSContextRef { self.0 } } impl Deref for Context { type Target = JSContextRef; fn deref(&self) -> &JSContextRef { &self.0 } } impl Deref for Object { type Target = JSObjectRef; fn deref(&self) -> &JSObjectRef { &self.1 } } retain_release!( ContextGroup, JSContextGroupRef, JSContextGroupRetain, JSContextGroupRelease ); retain_release!( GlobalContext, JSGlobalContextRef, JSGlobalContextRetain, JSGlobalContextRelease ); retain_release!(String, JSStringRef, JSStringRetain, JSStringRelease); impl ContextGroup { pub fn new() -> ContextGroup { let ptr = unsafe { JSContextGroupCreate() }; ContextGroup(ptr) } pub fn create_global_context(&self) -> GlobalContext { let ptr = unsafe { JSGlobalContextCreateInGroup(self.0, null_mut()) }; GlobalContext(ptr) } } pub struct Exception(Object); impl Exception { pub fn stack(&self) -> std::string::String { let stack_val = self .0 .get_property(&String::new("stack").unwrap()) .expect("no `stack` property found"); let stack_str = String::try_from(&stack_val).expect("no string property found for `stack`"); std::string::String::from(&stack_str) } pub fn message(&self) -> std::string::String { let message_val = self .0 .get_property(&String::new("message").unwrap()) .expect("no `message` property found"); let message_str = String::try_from(&message_val).expect("no string property found for `message`"); std::string::String::from(&message_str) } } impl GlobalContext { pub fn global_object(&self) -> Object { let ptr = unsafe { JSContextGetGlobalObject(self.0) }; Object(Context(self.0), ptr) } pub fn evaluate_script_sync(&self, script: &String) -> Result<Value, Exception> { let mut exception = null(); let ret = unsafe { JSEvaluateScript(self.0, **script, null_mut(), null_mut(), 0, &mut exception) }; if exception == null_mut() { Ok(Value::from(Context(self.0), ret)) } else { let value = Value::from(Context(self.0), exception); let obj = Object::try_from(&value).unwrap(); Err(Exception(obj)) } } pub async fn
<'a>(&'a self, script: &'a String) -> Result<Value, Exception> { self.evaluate_script_sync(script) } pub fn add_function( &self, name: &str, callback: JsCallback, ) -> Result<(), Box<dyn std::error::Error>> { let name = String::new(name).unwrap(); let obj = self.global_object(); let fn_obj = obj.make_function_with_callback(&name, callback); obj.set_property(&name, Value::from(Context(self.0), *fn_obj)); Ok(()) } } type JsCallback = fn(Context, /*thisObject*/ Object, /*arguments*/ Vec<Value>) -> Result<Value, String>; extern "C" fn callback_trampoline( ctx: JSContextRef, function: JSObjectRef, this_object: JSObjectRef, argument_count: usize, arguments: *const JSValueRef, exception: *mut JSValueRef, ) -> JSValueRef { let callback = unsafe { std::mem::transmute::<*mut ::std::os::raw::c_void, JsCallback>(JSObjectGetPrivate(function)) }; let ctx = Context(ctx); let args = unsafe { std::slice::from_raw_parts(arguments, argument_count) .into_iter() .map(|v| Value::from(ctx, *v)) .collect::<Vec<_>>() }; match callback(ctx, Object(ctx, this_object), args) { Ok(v) => v.0, Err(e) => unsafe { *exception = e.to_js_value(&ctx); JSValueMakeUndefined(ctx.0) }, } } impl ValueType { unsafe fn from(ctx: Context, value_ref: JSValueRef) -> ValueType { let raw_ty = JSValueGetType(ctx.0, value_ref); match raw_ty { 0 => ValueType::Undefined, 1 => ValueType::Null, 2 => ValueType::Boolean, 3 => ValueType::Number, 4 => ValueType::String, 5 => ValueType::Object, 6 => ValueType::Symbol, _ => unreachable!(), } } } impl Value { fn from(ctx: Context, value_ref: JSValueRef) -> Value { Value(value_ref, unsafe { ValueType::from(ctx, value_ref) }, ctx) } pub fn to_string(&self) -> std::string::String { match self.js_type() { ValueType::String => { let js_str = String::try_from(self).expect("string"); std::string::String::from(&js_str) } ValueType::Number => { let n = f64::try_from(self).expect("f64"); format!("{}", n) } ValueType::Boolean => { let v = bool::try_from(self).expect("bool"); format!("{}", v) } ValueType::Null => "null".into(), ValueType::Undefined => "undefined".into(), ValueType::Symbol => "Symbol(...)".into(), ValueType::Object => { let obj = Object::try_from(self).expect("object"); format!("{:?}", obj) } } } } fn rust_function_defn(name: &String) -> JSClassDefinition { JSClassDefinition { version: 0, attributes: 0, className: **name as *const _, parentClass: null_mut(), staticValues: null(), staticFunctions: null(), initialize: None, finalize: None, hasProperty: None, getProperty: None, setProperty: None, deleteProperty: None, getPropertyNames: None, callAsFunction: Some(callback_trampoline), callAsConstructor: None, hasInstance: None, convertToType: None, } } impl Value { pub fn js_type(&self) -> ValueType { self.1 } } impl Object { pub fn make_function_with_callback(&self, name: &String, callback: JsCallback) -> Object { let cls = unsafe { JSClassCreate(&rust_function_defn(name)) }; let ptr = unsafe { JSObjectMake(*self.0, cls, callback as _) }; if unsafe { JSObjectGetPrivate(ptr) } == null_mut() { panic!("No private"); } Object(self.0, ptr) } pub fn set_property(&self, name: &String, value: Value) { unsafe { JSObjectSetProperty(*self.0, self.1, **name, value.0, 0, null_mut()) }; } pub fn get_property(&self, name: &String) -> Result<Value, Value> { let mut exception = null(); let ret = unsafe { JSObjectGetProperty(*self.0, self.1, **name, &mut exception) }; if exception == null() { Ok(Value::from(self.0, ret)) } else { Err(Value::from(self.0, exception)) } } pub fn to_js_value(&self) -> Value { Value(self.1, ValueType::Object, self.0) } } impl String { pub fn new(s: &str) -> Result<String, Box<dyn std::error::Error>> { let s = CString::new(s)?; let v = unsafe { JSStringCreateWithUTF8CString(s.as_ptr() as *const i8) }; Ok(String(v)) } pub fn to_js_value(&self, ctx: &Context) -> JSValueRef { unsafe { JSValueMakeString(**ctx, self.0) } } }
evaluate_script
identifier_name
tag.rs
//! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)` //! where each word typically has multiple entries with different part-of-speech tags. use crate::types::*; use bimap::BiMap; use fs_err::File; use fst::{IntoStreamer, Map, Streamer}; use indexmap::IndexMap; use log::error; use serde::{Deserialize, Serialize}; use std::io::BufRead; use std::{borrow::Cow, iter::once}; use std::{collections::HashSet, path::Path}; #[derive(Serialize, Deserialize)] struct TaggerFields { tag_fst: Vec<u8>, word_store_fst: Vec<u8>, tag_store: BiMap<String, PosIdInt>, } impl From<Tagger> for TaggerFields { fn from(tagger: Tagger) -> Self { let mut tag_fst_items = Vec::new(); for (word_id, map) in tagger.tags.iter() { let mut i = 0u8; let word = tagger.str_for_word_id(word_id); for (inflect_id, pos_ids) in map.iter() { for pos_id in pos_ids { assert!(i < 255); i += 1; let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect(); let pos_bytes = pos_id.0.to_be_bytes(); let inflect_bytes = inflect_id.0.to_be_bytes(); let value = u64::from_be_bytes([ inflect_bytes[0], inflect_bytes[1], inflect_bytes[2], inflect_bytes[3], 0, 0, pos_bytes[0], pos_bytes[1], ]); tag_fst_items.push((key, value)); } } } tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let mut word_store_items: Vec<_> = tagger .word_store .iter() .map(|(key, value)| (key.clone(), value.0 as u64)) .collect(); word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let tag_fst = Map::from_iter(tag_fst_items) .unwrap() .into_fst() .as_bytes() .to_vec(); let word_store_fst = Map::from_iter(word_store_items) .unwrap() .into_fst() .as_bytes() .to_vec(); TaggerFields { tag_fst, word_store_fst, tag_store: tagger.tag_store, } } } impl From<TaggerFields> for Tagger { fn from(data: TaggerFields) -> Self { let word_store_fst = Map::new(data.word_store_fst).unwrap(); let word_store: BiMap<String, WordIdInt> = word_store_fst .into_stream() .into_str_vec() .unwrap() .into_iter() .map(|(key, value)| (key, WordIdInt(value as u32))) .collect(); let mut tags = DefaultHashMap::new(); let mut groups = DefaultHashMap::new(); let tag_fst = Map::new(data.tag_fst).unwrap(); let mut stream = tag_fst.into_stream(); while let Some((key, value)) = stream.next() { let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap(); let word_id = *word_store.get_by_left(word).unwrap(); let value_bytes = value.to_be_bytes(); let inflection_id = WordIdInt(u32::from_be_bytes([ value_bytes[0], value_bytes[1], value_bytes[2], value_bytes[3], ])); let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]])); let group = groups.entry(inflection_id).or_insert_with(Vec::new); if!group.contains(&word_id) { group.push(word_id); } tags.entry(word_id) .or_insert_with(IndexMap::new) .entry(inflection_id) .or_insert_with(Vec::new) .push(pos_id); } Tagger { tags, tag_store: data.tag_store, word_store, groups, } } } /// The lexical tagger. #[derive(Default, Serialize, Deserialize, Clone)] #[serde(from = "TaggerFields", into = "TaggerFields")] pub struct Tagger { tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>, tag_store: BiMap<String, PosIdInt>, word_store: BiMap<String, WordIdInt>, groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>, } impl Tagger { fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>( paths: &[S1], remove_paths: &[S2], ) -> std::io::Result<Vec<(String, String, String)>> { let mut output = Vec::new(); let mut disallowed: Vec<String> = Vec::new(); for path in remove_paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } disallowed.push(line.to_string()); } } for path in paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } if disallowed.contains(&line) { continue; } let parts: Vec<_> = line.split('\t').collect(); let word = parts[0].to_string(); let inflection = parts[1].to_string(); let tag = parts[2].to_string(); output.push((word, inflection, tag)) } } Ok(output) } /// Creates a tagger from raw files. /// /// # Arguments /// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be added to the tagger. /// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be removed from the tagger if present in the files from `paths`. pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>( paths: &[S1], remove_paths: &[S2], extra_tags: &[S3], common_words: &HashSet<String>, ) -> std::io::Result<Self> { let mut tags = DefaultHashMap::default(); let mut groups = DefaultHashMap::default(); let mut tag_store = HashSet::new(); let mut word_store = HashSet::new(); // hardcoded special tags tag_store.insert(""); tag_store.insert("SENT_START"); tag_store.insert("SENT_END"); tag_store.insert("UNKNOWN"); // add language specific special tags tag_store.extend(extra_tags.iter().map(|x| x.as_ref())); let lines = Tagger::get_lines(paths, remove_paths)?; let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~"; for i in 0..punct.len() { word_store.insert(&punct[i..(i + 1)]); } word_store.extend(common_words.iter().map(|x| x.as_str())); for (word, inflection, tag) in lines.iter() { word_store.insert(word); word_store.insert(inflection); tag_store.insert(tag); } // word store ids should be consistent across runs let mut word_store: Vec<_> = word_store.iter().collect(); word_store.sort(); // tag store ids should be consistent across runs let mut tag_store: Vec<_> = tag_store.iter().collect(); tag_store.sort(); let word_store: BiMap<_, _> = word_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), WordIdInt(i as u32))) .collect(); let tag_store: BiMap<_, _> = tag_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), PosIdInt(i as u16))) .collect(); for (word, inflection, tag) in lines.iter() { let word_id = word_store.get_by_left(word).unwrap(); let inflection_id = word_store.get_by_left(inflection).unwrap(); let pos_id = tag_store.get_by_left(tag).unwrap(); let group = groups.entry(*inflection_id).or_insert_with(Vec::new); if!group.contains(word_id) { group.push(*word_id); } tags.entry(*word_id) .or_insert_with(IndexMap::new) .entry(*inflection_id) .or_insert_with(Vec::new) .push(*pos_id); } Ok(Tagger { tags, groups, word_store, tag_store, }) } fn get_raw(&self, word: &str) -> Vec<WordData> { if let Some(map) = self .word_store .get_by_left(word) .and_then(|x| self.tags.get(x)) { let mut output = Vec::new(); for (key, value) in map.iter() { for pos_id in value { output.push(WordData::new( self.id_word(self.str_for_word_id(key).into()), self.id_tag(self.str_for_pos_id(pos_id)), )) } } output } else { Vec::new() } } fn
( &self, word: &str, add_lower: bool, add_lower_if_empty: bool, ) -> Vec<WordData> { let mut tags = self.get_raw(&word); let lower = word.to_lowercase(); if (add_lower || (add_lower_if_empty && tags.is_empty())) && (word!= lower && (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word))) { tags.extend(self.get_raw(&lower)); } tags } #[allow(dead_code)] // used by compile module pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> { &self.tag_store } #[allow(dead_code)] // used by compile module pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> { &self.word_store } fn str_for_word_id(&self, id: &WordIdInt) -> &str { self.word_store .get_by_right(id) .expect("only valid word ids are created") } fn str_for_pos_id(&self, id: &PosIdInt) -> &str { self.tag_store .get_by_right(id) .expect("only valid pos ids are created") } pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> { PosId( tag, *self.tag_store.get_by_left(tag).unwrap_or_else(|| { error!( "'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.", tag ); self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store") }), ) } pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> { let id = self.word_store.get_by_left(text.as_ref()).copied(); WordId(text, id) } /// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word. /// /// # Arguments /// * `word`: The word to lookup data for. /// * `add_lower`: Whether to add data for the lowercase variant of the word. /// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words. /// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags /// for substrings from left to right until tags are found or a minimum length reached. pub fn get_tags( &self, word: &str, add_lower: bool, use_compound_split_heuristic: bool, ) -> Vec<WordData> { let mut tags = self.get_strict_tags(word, add_lower, true); // compound splitting heuristic, seems to work reasonably well if use_compound_split_heuristic && tags.is_empty() { let n_chars = word.chars().count() as isize; if n_chars >= 7 { let indices = word .char_indices() .take(std::cmp::max(n_chars - 4, 0) as usize) .skip(1) .map(|x| x.0); // the word always has at least one char if the above condition is satisfied // but semantically this is false if no char exists let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase()); for i in indices { let next = if starts_with_uppercase { crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect()) } else { word[i..].to_string() }; let next_tags = self.get_strict_tags(&next, add_lower, false); if!next_tags.is_empty() { tags = next_tags .into_iter() .map(|mut x| { x.lemma = self.id_word( format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase()) .into(), ); x }) .collect(); break; } } } } tags } /// Get the words with the same lemma as the given lemma. pub fn get_group_members(&self, lemma: &str) -> Vec<&str> { self.word_store .get_by_left(lemma) .and_then(|x| self.groups.get(x)) .map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect()) .unwrap_or_else(Vec::new) } }
get_strict_tags
identifier_name
tag.rs
//! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)` //! where each word typically has multiple entries with different part-of-speech tags. use crate::types::*; use bimap::BiMap; use fs_err::File; use fst::{IntoStreamer, Map, Streamer}; use indexmap::IndexMap; use log::error; use serde::{Deserialize, Serialize}; use std::io::BufRead; use std::{borrow::Cow, iter::once}; use std::{collections::HashSet, path::Path}; #[derive(Serialize, Deserialize)] struct TaggerFields { tag_fst: Vec<u8>, word_store_fst: Vec<u8>, tag_store: BiMap<String, PosIdInt>, } impl From<Tagger> for TaggerFields { fn from(tagger: Tagger) -> Self { let mut tag_fst_items = Vec::new(); for (word_id, map) in tagger.tags.iter() { let mut i = 0u8; let word = tagger.str_for_word_id(word_id); for (inflect_id, pos_ids) in map.iter() { for pos_id in pos_ids { assert!(i < 255); i += 1; let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect(); let pos_bytes = pos_id.0.to_be_bytes(); let inflect_bytes = inflect_id.0.to_be_bytes(); let value = u64::from_be_bytes([ inflect_bytes[0], inflect_bytes[1], inflect_bytes[2], inflect_bytes[3], 0, 0, pos_bytes[0], pos_bytes[1], ]); tag_fst_items.push((key, value)); } } } tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let mut word_store_items: Vec<_> = tagger .word_store .iter() .map(|(key, value)| (key.clone(), value.0 as u64)) .collect(); word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let tag_fst = Map::from_iter(tag_fst_items) .unwrap() .into_fst() .as_bytes() .to_vec(); let word_store_fst = Map::from_iter(word_store_items) .unwrap() .into_fst() .as_bytes() .to_vec(); TaggerFields { tag_fst, word_store_fst, tag_store: tagger.tag_store, } } } impl From<TaggerFields> for Tagger { fn from(data: TaggerFields) -> Self { let word_store_fst = Map::new(data.word_store_fst).unwrap(); let word_store: BiMap<String, WordIdInt> = word_store_fst .into_stream() .into_str_vec() .unwrap() .into_iter() .map(|(key, value)| (key, WordIdInt(value as u32))) .collect(); let mut tags = DefaultHashMap::new(); let mut groups = DefaultHashMap::new(); let tag_fst = Map::new(data.tag_fst).unwrap(); let mut stream = tag_fst.into_stream(); while let Some((key, value)) = stream.next() { let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap(); let word_id = *word_store.get_by_left(word).unwrap(); let value_bytes = value.to_be_bytes(); let inflection_id = WordIdInt(u32::from_be_bytes([ value_bytes[0], value_bytes[1], value_bytes[2], value_bytes[3], ])); let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]])); let group = groups.entry(inflection_id).or_insert_with(Vec::new); if!group.contains(&word_id) { group.push(word_id); } tags.entry(word_id) .or_insert_with(IndexMap::new) .entry(inflection_id) .or_insert_with(Vec::new) .push(pos_id); } Tagger { tags, tag_store: data.tag_store, word_store, groups, } } } /// The lexical tagger. #[derive(Default, Serialize, Deserialize, Clone)] #[serde(from = "TaggerFields", into = "TaggerFields")] pub struct Tagger { tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>, tag_store: BiMap<String, PosIdInt>, word_store: BiMap<String, WordIdInt>, groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>, } impl Tagger { fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>( paths: &[S1], remove_paths: &[S2], ) -> std::io::Result<Vec<(String, String, String)>> { let mut output = Vec::new(); let mut disallowed: Vec<String> = Vec::new(); for path in remove_paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } disallowed.push(line.to_string()); } } for path in paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } if disallowed.contains(&line) { continue; } let parts: Vec<_> = line.split('\t').collect(); let word = parts[0].to_string(); let inflection = parts[1].to_string(); let tag = parts[2].to_string(); output.push((word, inflection, tag)) } } Ok(output) } /// Creates a tagger from raw files. /// /// # Arguments /// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be added to the tagger. /// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be removed from the tagger if present in the files from `paths`. pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>( paths: &[S1], remove_paths: &[S2], extra_tags: &[S3], common_words: &HashSet<String>, ) -> std::io::Result<Self> { let mut tags = DefaultHashMap::default(); let mut groups = DefaultHashMap::default(); let mut tag_store = HashSet::new(); let mut word_store = HashSet::new(); // hardcoded special tags tag_store.insert(""); tag_store.insert("SENT_START"); tag_store.insert("SENT_END"); tag_store.insert("UNKNOWN"); // add language specific special tags tag_store.extend(extra_tags.iter().map(|x| x.as_ref())); let lines = Tagger::get_lines(paths, remove_paths)?; let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~"; for i in 0..punct.len() { word_store.insert(&punct[i..(i + 1)]); } word_store.extend(common_words.iter().map(|x| x.as_str())); for (word, inflection, tag) in lines.iter() { word_store.insert(word); word_store.insert(inflection); tag_store.insert(tag); } // word store ids should be consistent across runs let mut word_store: Vec<_> = word_store.iter().collect(); word_store.sort(); // tag store ids should be consistent across runs let mut tag_store: Vec<_> = tag_store.iter().collect(); tag_store.sort(); let word_store: BiMap<_, _> = word_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), WordIdInt(i as u32))) .collect(); let tag_store: BiMap<_, _> = tag_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), PosIdInt(i as u16))) .collect(); for (word, inflection, tag) in lines.iter() { let word_id = word_store.get_by_left(word).unwrap(); let inflection_id = word_store.get_by_left(inflection).unwrap(); let pos_id = tag_store.get_by_left(tag).unwrap(); let group = groups.entry(*inflection_id).or_insert_with(Vec::new); if!group.contains(word_id) { group.push(*word_id); } tags.entry(*word_id) .or_insert_with(IndexMap::new) .entry(*inflection_id) .or_insert_with(Vec::new) .push(*pos_id); } Ok(Tagger { tags, groups, word_store, tag_store, }) } fn get_raw(&self, word: &str) -> Vec<WordData> { if let Some(map) = self .word_store .get_by_left(word) .and_then(|x| self.tags.get(x)) { let mut output = Vec::new(); for (key, value) in map.iter() { for pos_id in value { output.push(WordData::new( self.id_word(self.str_for_word_id(key).into()), self.id_tag(self.str_for_pos_id(pos_id)), )) } } output } else { Vec::new() } } fn get_strict_tags( &self, word: &str, add_lower: bool, add_lower_if_empty: bool, ) -> Vec<WordData> { let mut tags = self.get_raw(&word); let lower = word.to_lowercase(); if (add_lower || (add_lower_if_empty && tags.is_empty())) && (word!= lower && (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word))) { tags.extend(self.get_raw(&lower)); } tags } #[allow(dead_code)] // used by compile module pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> { &self.tag_store } #[allow(dead_code)] // used by compile module pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> { &self.word_store } fn str_for_word_id(&self, id: &WordIdInt) -> &str { self.word_store .get_by_right(id) .expect("only valid word ids are created") } fn str_for_pos_id(&self, id: &PosIdInt) -> &str { self.tag_store .get_by_right(id) .expect("only valid pos ids are created") } pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> { PosId( tag, *self.tag_store.get_by_left(tag).unwrap_or_else(|| { error!( "'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.", tag ); self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store") }), ) } pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> { let id = self.word_store.get_by_left(text.as_ref()).copied(); WordId(text, id) } /// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word. /// /// # Arguments /// * `word`: The word to lookup data for. /// * `add_lower`: Whether to add data for the lowercase variant of the word. /// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words. /// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags /// for substrings from left to right until tags are found or a minimum length reached. pub fn get_tags( &self, word: &str, add_lower: bool, use_compound_split_heuristic: bool, ) -> Vec<WordData> { let mut tags = self.get_strict_tags(word, add_lower, true); // compound splitting heuristic, seems to work reasonably well if use_compound_split_heuristic && tags.is_empty() { let n_chars = word.chars().count() as isize; if n_chars >= 7
tags = next_tags .into_iter() .map(|mut x| { x.lemma = self.id_word( format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase()) .into(), ); x }) .collect(); break; } } } } tags } /// Get the words with the same lemma as the given lemma. pub fn get_group_members(&self, lemma: &str) -> Vec<&str> { self.word_store .get_by_left(lemma) .and_then(|x| self.groups.get(x)) .map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect()) .unwrap_or_else(Vec::new) } }
{ let indices = word .char_indices() .take(std::cmp::max(n_chars - 4, 0) as usize) .skip(1) .map(|x| x.0); // the word always has at least one char if the above condition is satisfied // but semantically this is false if no char exists let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase()); for i in indices { let next = if starts_with_uppercase { crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect()) } else { word[i..].to_string() }; let next_tags = self.get_strict_tags(&next, add_lower, false); if !next_tags.is_empty() {
conditional_block
tag.rs
//! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)` //! where each word typically has multiple entries with different part-of-speech tags. use crate::types::*; use bimap::BiMap; use fs_err::File; use fst::{IntoStreamer, Map, Streamer}; use indexmap::IndexMap; use log::error; use serde::{Deserialize, Serialize}; use std::io::BufRead; use std::{borrow::Cow, iter::once}; use std::{collections::HashSet, path::Path}; #[derive(Serialize, Deserialize)] struct TaggerFields { tag_fst: Vec<u8>, word_store_fst: Vec<u8>, tag_store: BiMap<String, PosIdInt>, } impl From<Tagger> for TaggerFields { fn from(tagger: Tagger) -> Self { let mut tag_fst_items = Vec::new(); for (word_id, map) in tagger.tags.iter() { let mut i = 0u8; let word = tagger.str_for_word_id(word_id); for (inflect_id, pos_ids) in map.iter() { for pos_id in pos_ids { assert!(i < 255); i += 1; let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect(); let pos_bytes = pos_id.0.to_be_bytes(); let inflect_bytes = inflect_id.0.to_be_bytes(); let value = u64::from_be_bytes([ inflect_bytes[0], inflect_bytes[1], inflect_bytes[2], inflect_bytes[3], 0, 0, pos_bytes[0], pos_bytes[1], ]); tag_fst_items.push((key, value)); } } } tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let mut word_store_items: Vec<_> = tagger .word_store .iter() .map(|(key, value)| (key.clone(), value.0 as u64)) .collect(); word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let tag_fst = Map::from_iter(tag_fst_items) .unwrap() .into_fst() .as_bytes() .to_vec(); let word_store_fst = Map::from_iter(word_store_items) .unwrap() .into_fst() .as_bytes() .to_vec(); TaggerFields { tag_fst, word_store_fst, tag_store: tagger.tag_store, } } } impl From<TaggerFields> for Tagger { fn from(data: TaggerFields) -> Self { let word_store_fst = Map::new(data.word_store_fst).unwrap(); let word_store: BiMap<String, WordIdInt> = word_store_fst .into_stream() .into_str_vec() .unwrap() .into_iter() .map(|(key, value)| (key, WordIdInt(value as u32))) .collect(); let mut tags = DefaultHashMap::new(); let mut groups = DefaultHashMap::new(); let tag_fst = Map::new(data.tag_fst).unwrap(); let mut stream = tag_fst.into_stream(); while let Some((key, value)) = stream.next() { let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap(); let word_id = *word_store.get_by_left(word).unwrap(); let value_bytes = value.to_be_bytes(); let inflection_id = WordIdInt(u32::from_be_bytes([ value_bytes[0], value_bytes[1], value_bytes[2], value_bytes[3], ])); let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]])); let group = groups.entry(inflection_id).or_insert_with(Vec::new); if!group.contains(&word_id) { group.push(word_id); } tags.entry(word_id) .or_insert_with(IndexMap::new) .entry(inflection_id) .or_insert_with(Vec::new) .push(pos_id); } Tagger { tags, tag_store: data.tag_store, word_store, groups, } } } /// The lexical tagger. #[derive(Default, Serialize, Deserialize, Clone)] #[serde(from = "TaggerFields", into = "TaggerFields")] pub struct Tagger { tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>, tag_store: BiMap<String, PosIdInt>, word_store: BiMap<String, WordIdInt>, groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>, } impl Tagger { fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>( paths: &[S1], remove_paths: &[S2], ) -> std::io::Result<Vec<(String, String, String)>> { let mut output = Vec::new(); let mut disallowed: Vec<String> = Vec::new(); for path in remove_paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } disallowed.push(line.to_string()); } } for path in paths { let file = File::open(path.as_ref())?;
let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } if disallowed.contains(&line) { continue; } let parts: Vec<_> = line.split('\t').collect(); let word = parts[0].to_string(); let inflection = parts[1].to_string(); let tag = parts[2].to_string(); output.push((word, inflection, tag)) } } Ok(output) } /// Creates a tagger from raw files. /// /// # Arguments /// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be added to the tagger. /// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be removed from the tagger if present in the files from `paths`. pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>( paths: &[S1], remove_paths: &[S2], extra_tags: &[S3], common_words: &HashSet<String>, ) -> std::io::Result<Self> { let mut tags = DefaultHashMap::default(); let mut groups = DefaultHashMap::default(); let mut tag_store = HashSet::new(); let mut word_store = HashSet::new(); // hardcoded special tags tag_store.insert(""); tag_store.insert("SENT_START"); tag_store.insert("SENT_END"); tag_store.insert("UNKNOWN"); // add language specific special tags tag_store.extend(extra_tags.iter().map(|x| x.as_ref())); let lines = Tagger::get_lines(paths, remove_paths)?; let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~"; for i in 0..punct.len() { word_store.insert(&punct[i..(i + 1)]); } word_store.extend(common_words.iter().map(|x| x.as_str())); for (word, inflection, tag) in lines.iter() { word_store.insert(word); word_store.insert(inflection); tag_store.insert(tag); } // word store ids should be consistent across runs let mut word_store: Vec<_> = word_store.iter().collect(); word_store.sort(); // tag store ids should be consistent across runs let mut tag_store: Vec<_> = tag_store.iter().collect(); tag_store.sort(); let word_store: BiMap<_, _> = word_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), WordIdInt(i as u32))) .collect(); let tag_store: BiMap<_, _> = tag_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), PosIdInt(i as u16))) .collect(); for (word, inflection, tag) in lines.iter() { let word_id = word_store.get_by_left(word).unwrap(); let inflection_id = word_store.get_by_left(inflection).unwrap(); let pos_id = tag_store.get_by_left(tag).unwrap(); let group = groups.entry(*inflection_id).or_insert_with(Vec::new); if!group.contains(word_id) { group.push(*word_id); } tags.entry(*word_id) .or_insert_with(IndexMap::new) .entry(*inflection_id) .or_insert_with(Vec::new) .push(*pos_id); } Ok(Tagger { tags, groups, word_store, tag_store, }) } fn get_raw(&self, word: &str) -> Vec<WordData> { if let Some(map) = self .word_store .get_by_left(word) .and_then(|x| self.tags.get(x)) { let mut output = Vec::new(); for (key, value) in map.iter() { for pos_id in value { output.push(WordData::new( self.id_word(self.str_for_word_id(key).into()), self.id_tag(self.str_for_pos_id(pos_id)), )) } } output } else { Vec::new() } } fn get_strict_tags( &self, word: &str, add_lower: bool, add_lower_if_empty: bool, ) -> Vec<WordData> { let mut tags = self.get_raw(&word); let lower = word.to_lowercase(); if (add_lower || (add_lower_if_empty && tags.is_empty())) && (word!= lower && (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word))) { tags.extend(self.get_raw(&lower)); } tags } #[allow(dead_code)] // used by compile module pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> { &self.tag_store } #[allow(dead_code)] // used by compile module pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> { &self.word_store } fn str_for_word_id(&self, id: &WordIdInt) -> &str { self.word_store .get_by_right(id) .expect("only valid word ids are created") } fn str_for_pos_id(&self, id: &PosIdInt) -> &str { self.tag_store .get_by_right(id) .expect("only valid pos ids are created") } pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> { PosId( tag, *self.tag_store.get_by_left(tag).unwrap_or_else(|| { error!( "'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.", tag ); self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store") }), ) } pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> { let id = self.word_store.get_by_left(text.as_ref()).copied(); WordId(text, id) } /// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word. /// /// # Arguments /// * `word`: The word to lookup data for. /// * `add_lower`: Whether to add data for the lowercase variant of the word. /// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words. /// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags /// for substrings from left to right until tags are found or a minimum length reached. pub fn get_tags( &self, word: &str, add_lower: bool, use_compound_split_heuristic: bool, ) -> Vec<WordData> { let mut tags = self.get_strict_tags(word, add_lower, true); // compound splitting heuristic, seems to work reasonably well if use_compound_split_heuristic && tags.is_empty() { let n_chars = word.chars().count() as isize; if n_chars >= 7 { let indices = word .char_indices() .take(std::cmp::max(n_chars - 4, 0) as usize) .skip(1) .map(|x| x.0); // the word always has at least one char if the above condition is satisfied // but semantically this is false if no char exists let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase()); for i in indices { let next = if starts_with_uppercase { crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect()) } else { word[i..].to_string() }; let next_tags = self.get_strict_tags(&next, add_lower, false); if!next_tags.is_empty() { tags = next_tags .into_iter() .map(|mut x| { x.lemma = self.id_word( format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase()) .into(), ); x }) .collect(); break; } } } } tags } /// Get the words with the same lemma as the given lemma. pub fn get_group_members(&self, lemma: &str) -> Vec<&str> { self.word_store .get_by_left(lemma) .and_then(|x| self.groups.get(x)) .map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect()) .unwrap_or_else(Vec::new) } }
random_line_split
tag.rs
//! A dictionary-based tagger. The raw format is tuples of the form `(word, lemma, part-of-speech)` //! where each word typically has multiple entries with different part-of-speech tags. use crate::types::*; use bimap::BiMap; use fs_err::File; use fst::{IntoStreamer, Map, Streamer}; use indexmap::IndexMap; use log::error; use serde::{Deserialize, Serialize}; use std::io::BufRead; use std::{borrow::Cow, iter::once}; use std::{collections::HashSet, path::Path}; #[derive(Serialize, Deserialize)] struct TaggerFields { tag_fst: Vec<u8>, word_store_fst: Vec<u8>, tag_store: BiMap<String, PosIdInt>, } impl From<Tagger> for TaggerFields { fn from(tagger: Tagger) -> Self { let mut tag_fst_items = Vec::new(); for (word_id, map) in tagger.tags.iter() { let mut i = 0u8; let word = tagger.str_for_word_id(word_id); for (inflect_id, pos_ids) in map.iter() { for pos_id in pos_ids { assert!(i < 255); i += 1; let key: Vec<u8> = word.as_bytes().iter().chain(once(&i)).copied().collect(); let pos_bytes = pos_id.0.to_be_bytes(); let inflect_bytes = inflect_id.0.to_be_bytes(); let value = u64::from_be_bytes([ inflect_bytes[0], inflect_bytes[1], inflect_bytes[2], inflect_bytes[3], 0, 0, pos_bytes[0], pos_bytes[1], ]); tag_fst_items.push((key, value)); } } } tag_fst_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let mut word_store_items: Vec<_> = tagger .word_store .iter() .map(|(key, value)| (key.clone(), value.0 as u64)) .collect(); word_store_items.sort_by(|(a, _), (b, _)| a.cmp(b)); let tag_fst = Map::from_iter(tag_fst_items) .unwrap() .into_fst() .as_bytes() .to_vec(); let word_store_fst = Map::from_iter(word_store_items) .unwrap() .into_fst() .as_bytes() .to_vec(); TaggerFields { tag_fst, word_store_fst, tag_store: tagger.tag_store, } } } impl From<TaggerFields> for Tagger { fn from(data: TaggerFields) -> Self { let word_store_fst = Map::new(data.word_store_fst).unwrap(); let word_store: BiMap<String, WordIdInt> = word_store_fst .into_stream() .into_str_vec() .unwrap() .into_iter() .map(|(key, value)| (key, WordIdInt(value as u32))) .collect(); let mut tags = DefaultHashMap::new(); let mut groups = DefaultHashMap::new(); let tag_fst = Map::new(data.tag_fst).unwrap(); let mut stream = tag_fst.into_stream(); while let Some((key, value)) = stream.next() { let word = std::str::from_utf8(&key[..key.len() - 1]).unwrap(); let word_id = *word_store.get_by_left(word).unwrap(); let value_bytes = value.to_be_bytes(); let inflection_id = WordIdInt(u32::from_be_bytes([ value_bytes[0], value_bytes[1], value_bytes[2], value_bytes[3], ])); let pos_id = PosIdInt(u16::from_be_bytes([value_bytes[6], value_bytes[7]])); let group = groups.entry(inflection_id).or_insert_with(Vec::new); if!group.contains(&word_id) { group.push(word_id); } tags.entry(word_id) .or_insert_with(IndexMap::new) .entry(inflection_id) .or_insert_with(Vec::new) .push(pos_id); } Tagger { tags, tag_store: data.tag_store, word_store, groups, } } } /// The lexical tagger. #[derive(Default, Serialize, Deserialize, Clone)] #[serde(from = "TaggerFields", into = "TaggerFields")] pub struct Tagger { tags: DefaultHashMap<WordIdInt, IndexMap<WordIdInt, Vec<PosIdInt>>>, tag_store: BiMap<String, PosIdInt>, word_store: BiMap<String, WordIdInt>, groups: DefaultHashMap<WordIdInt, Vec<WordIdInt>>, } impl Tagger { fn get_lines<S1: AsRef<Path>, S2: AsRef<Path>>( paths: &[S1], remove_paths: &[S2], ) -> std::io::Result<Vec<(String, String, String)>> { let mut output = Vec::new(); let mut disallowed: Vec<String> = Vec::new(); for path in remove_paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } disallowed.push(line.to_string()); } } for path in paths { let file = File::open(path.as_ref())?; let reader = std::io::BufReader::new(file); for line in reader.lines() { let line = line?; if line.starts_with('#') { continue; } if disallowed.contains(&line) { continue; } let parts: Vec<_> = line.split('\t').collect(); let word = parts[0].to_string(); let inflection = parts[1].to_string(); let tag = parts[2].to_string(); output.push((word, inflection, tag)) } } Ok(output) } /// Creates a tagger from raw files. /// /// # Arguments /// * `paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be added to the tagger. /// * `remove_paths`: Paths to files where each line contains the word, lemma and tag, respectively, /// separated by tabs, to be removed from the tagger if present in the files from `paths`. pub fn from_dumps<S1: AsRef<Path>, S2: AsRef<Path>, S3: AsRef<str>>( paths: &[S1], remove_paths: &[S2], extra_tags: &[S3], common_words: &HashSet<String>, ) -> std::io::Result<Self> { let mut tags = DefaultHashMap::default(); let mut groups = DefaultHashMap::default(); let mut tag_store = HashSet::new(); let mut word_store = HashSet::new(); // hardcoded special tags tag_store.insert(""); tag_store.insert("SENT_START"); tag_store.insert("SENT_END"); tag_store.insert("UNKNOWN"); // add language specific special tags tag_store.extend(extra_tags.iter().map(|x| x.as_ref())); let lines = Tagger::get_lines(paths, remove_paths)?; let punct = "!\"#$%&\\'()*+,-./:;<=>?@[\\]^_`{|}~"; for i in 0..punct.len() { word_store.insert(&punct[i..(i + 1)]); } word_store.extend(common_words.iter().map(|x| x.as_str())); for (word, inflection, tag) in lines.iter() { word_store.insert(word); word_store.insert(inflection); tag_store.insert(tag); } // word store ids should be consistent across runs let mut word_store: Vec<_> = word_store.iter().collect(); word_store.sort(); // tag store ids should be consistent across runs let mut tag_store: Vec<_> = tag_store.iter().collect(); tag_store.sort(); let word_store: BiMap<_, _> = word_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), WordIdInt(i as u32))) .collect(); let tag_store: BiMap<_, _> = tag_store .iter() .enumerate() .map(|(i, x)| (x.to_string(), PosIdInt(i as u16))) .collect(); for (word, inflection, tag) in lines.iter() { let word_id = word_store.get_by_left(word).unwrap(); let inflection_id = word_store.get_by_left(inflection).unwrap(); let pos_id = tag_store.get_by_left(tag).unwrap(); let group = groups.entry(*inflection_id).or_insert_with(Vec::new); if!group.contains(word_id) { group.push(*word_id); } tags.entry(*word_id) .or_insert_with(IndexMap::new) .entry(*inflection_id) .or_insert_with(Vec::new) .push(*pos_id); } Ok(Tagger { tags, groups, word_store, tag_store, }) } fn get_raw(&self, word: &str) -> Vec<WordData> { if let Some(map) = self .word_store .get_by_left(word) .and_then(|x| self.tags.get(x)) { let mut output = Vec::new(); for (key, value) in map.iter() { for pos_id in value { output.push(WordData::new( self.id_word(self.str_for_word_id(key).into()), self.id_tag(self.str_for_pos_id(pos_id)), )) } } output } else { Vec::new() } } fn get_strict_tags( &self, word: &str, add_lower: bool, add_lower_if_empty: bool, ) -> Vec<WordData> { let mut tags = self.get_raw(&word); let lower = word.to_lowercase(); if (add_lower || (add_lower_if_empty && tags.is_empty())) && (word!= lower && (crate::utils::is_title_case(word) || crate::utils::is_uppercase(word))) { tags.extend(self.get_raw(&lower)); } tags } #[allow(dead_code)] // used by compile module pub(crate) fn tag_store(&self) -> &BiMap<String, PosIdInt> { &self.tag_store } #[allow(dead_code)] // used by compile module pub(crate) fn word_store(&self) -> &BiMap<String, WordIdInt> { &self.word_store } fn str_for_word_id(&self, id: &WordIdInt) -> &str { self.word_store .get_by_right(id) .expect("only valid word ids are created") } fn str_for_pos_id(&self, id: &PosIdInt) -> &str { self.tag_store .get_by_right(id) .expect("only valid pos ids are created") } pub fn id_tag<'a>(&self, tag: &'a str) -> PosId<'a> { PosId( tag, *self.tag_store.get_by_left(tag).unwrap_or_else(|| { error!( "'{}' not found in tag store, please add it to the `extra_tags`. Using UNKNOWN instead.", tag ); self.tag_store.get_by_left("UNKNOWN").expect("UNKNOWN tag must exist in tag store") }), ) } pub fn id_word<'t>(&'t self, text: Cow<'t, str>) -> WordId<'t> { let id = self.word_store.get_by_left(text.as_ref()).copied(); WordId(text, id) } /// Get the tags and lemmas (as [WordData][crate::types::WordData]) for the given word. /// /// # Arguments /// * `word`: The word to lookup data for. /// * `add_lower`: Whether to add data for the lowercase variant of the word. /// * `use_compound_split_heuristic`: Whether to use a heuristic to split compound words. /// If true, will attempt to find tags for words which are longer than some cutoff and unknown by looking up tags /// for substrings from left to right until tags are found or a minimum length reached. pub fn get_tags( &self, word: &str, add_lower: bool, use_compound_split_heuristic: bool, ) -> Vec<WordData> { let mut tags = self.get_strict_tags(word, add_lower, true); // compound splitting heuristic, seems to work reasonably well if use_compound_split_heuristic && tags.is_empty() { let n_chars = word.chars().count() as isize; if n_chars >= 7 { let indices = word .char_indices() .take(std::cmp::max(n_chars - 4, 0) as usize) .skip(1) .map(|x| x.0); // the word always has at least one char if the above condition is satisfied // but semantically this is false if no char exists let starts_with_uppercase = word.chars().next().map_or(false, |x| x.is_uppercase()); for i in indices { let next = if starts_with_uppercase { crate::utils::apply_to_first(&word[i..], |c| c.to_uppercase().collect()) } else { word[i..].to_string() }; let next_tags = self.get_strict_tags(&next, add_lower, false); if!next_tags.is_empty() { tags = next_tags .into_iter() .map(|mut x| { x.lemma = self.id_word( format!("{}{}", &word[..i], x.lemma.as_ref().to_lowercase()) .into(), ); x }) .collect(); break; } } } } tags } /// Get the words with the same lemma as the given lemma. pub fn get_group_members(&self, lemma: &str) -> Vec<&str>
}
{ self.word_store .get_by_left(lemma) .and_then(|x| self.groups.get(x)) .map(|vec| vec.iter().map(|x| self.str_for_word_id(x)).collect()) .unwrap_or_else(Vec::new) }
identifier_body
rca.rs
// Optimization for RCA // Ordinarily, just a, b, c, and d are scanned separately and then combined by joins. // a: (each product, each city) // can be cut on drill 1 // b: (all products, each city) // c: (each product, all cities) // can be cut on drill 1 // d: (all products, all cities) // // Note that external cuts are always valid (i.e. if above abcd were cut by a year). // // However, this results in extra scans, especially if there's no internal cuts (cuts on an rca // drill dim). // // The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the // first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill). // // In clickhouse there is no partition, so it's trickier to do what looks like two different group // by. // // The general idea is to do one group by, in which both the measure and the 2nd drill are rolled // up. // - measure is rolled up by aggregate fn (e.g. sum) // - 2nd drill is rolled up by groupArray, which just collects all the values into an array in // order. // - the original measure is also rolled up by groupArray. // // Then the pivoted table is melted using Array Join on the 2nd drill and the original measure // (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted // table. // // An example (not accounting for external cuts or dims) would be // select drill_1_id, drill_2_id, a, c from ( // select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from ( // select * from a_table // ) // group by drill_1_id // ) // array join drill_2_id_s as drill_2_id, a_s as a use itertools::join; use crate::sql::primary_agg::primary_agg; use super::{ TableSql, CutSql, DrilldownSql, MeasureSql, RcaSql, }; pub fn
( table: &TableSql, cuts: &[CutSql], drills: &[DrilldownSql], meas: &[MeasureSql], rca: &RcaSql, ) -> (String, String) { // append the correct rca drill to drilldowns // for a, both // for b, d2 // for c, d1 // for d, none let mut a_drills = drills.to_vec(); let mut b_drills = drills.to_vec(); let mut c_drills = drills.to_vec(); let d_drills = drills.to_vec(); a_drills.extend_from_slice(&rca.drill_1); a_drills.extend_from_slice(&rca.drill_2); b_drills.extend_from_slice(&rca.drill_2); c_drills.extend_from_slice(&rca.drill_1); println!("a: {:?}", a_drills); println!("b: {:?}", b_drills); println!("c: {:?}", c_drills); println!("d: {:?}", d_drills); // prepend the rca sql to meas let all_meas = { let mut temp = vec![rca.mea.clone()]; temp.extend_from_slice(meas); temp }; // for cuts, // - a can be cut on d1 and ext // - b cannot be int cut, only ext // - c can be cut on d1 and ext // - d cannot be int cut, only ext // // In the future, would I allow more cuts? Maybe depending on use case // // The blacklist is the drilldowns contained in each of a, b, c, d // // Note: parent of rca drills are not filtered, because they are meant // to limit the rca calculation space! // // don't need to worry about aliases, because cuts don't use aliases, // and are just matching against drill key col let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter() .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter()) .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let ac_cuts: Vec<_> = cuts.iter() .filter(|cut| { ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); let bd_cuts: Vec<_> = cuts.iter() .filter(|cut| { bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); println!("{:#?}", cuts); println!("{:#?}", ac_cuts); println!("{:#?}", bd_cuts); // now aggregate each component // // As an optimization, c is calculated from a, and d is calculated from b // If there's no internal cuts, then b, c, d are calculated from a. // First do aggregation for part a, b let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None); let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None); // replace final_m0 with letter name. // I put the rca measure at the beginning of the drills, so it should // always be m0 let a = a.replace("final_m0", "a"); let b = b.replace("final_m0", "b"); // for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when // aggregating a to c, and b to d. // (drill_2 would be needed if going from a to b) // TODO refacto these lines out to helpers let group_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix) } }) }); let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", "); let join_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix) } }) }); let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", "); // Do GroupArray and Array Join clauses for external measures, also let mea_cols = (1..=meas.len()) .map(|m_idx| format!("final_m{col}", col=m_idx)); let mea_cols = join(mea_cols, ", "); let group_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx)); let group_array_ext_mea = join(group_array_ext_mea, ", "); let join_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx)); let join_array_ext_mea = join(join_array_ext_mea, ", "); // groupArray cols (the drill_2 from rca) can't be included in the group by or select let c_drills_minus_rca_drill_2 = c_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", "); let d_drills_minus_rca_drill_2 = d_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", "); // a and c drills are kept as-is let a_drills_str = a_drills.iter() .map(|d| d.col_alias_only_string()); let a_drills_str = join(a_drills_str, ", "); let b_drills_str = b_drills.iter() .map(|d| d.col_alias_only_string()); let b_drills_str = join(b_drills_str, ", "); // Now add part c let ac = format!("select {}, {}{} a, c from \ (select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \ Array Join {}, {}{} a_s as a", a_drills_str, mea_cols, if mea_cols.is_empty() { "" } else { "," }, c_drills_minus_rca_drill_2, group_array_rca_drill_2, group_array_ext_mea, if group_array_ext_mea.is_empty() { "" } else { "," }, a, c_drills_minus_rca_drill_2, join_array_rca_drill_2, join_array_ext_mea, if join_array_ext_mea.is_empty() { "" } else { "," }, ); println!("{}", ac); // Now add part d let bd = if d_drills.is_empty() { format!("select {}, b, d from \ (select {}, groupArray(b) as b_s, sum(b) as d from ({})) \ Array Join {}, b_s as b", b_drills_str, group_array_rca_drill_2, b, join_array_rca_drill_2, ) } else { format!("select {}, b, d from \ (select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \ Array Join {}, b_s as b", b_drills_str, d_drills_minus_rca_drill_2, group_array_rca_drill_2, b, d_drills_minus_rca_drill_2, join_array_rca_drill_2, ) }; println!("bd: {}", bd); // now do the final join let mut final_sql = format!("select * from ({}) all inner join ({}) using {}", ac, bd, b_final_drills, ); // adding final measures at the end let final_ext_meas = if!meas.is_empty() { ", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ") } else { "".to_owned() }; final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})", a_final_drills, if rca.debug { "a, b, c, d, " } else { "" }, final_ext_meas, final_sql, ); // SPECIAL CASE // Hack to deal with no drills on d // Later, make this better final_sql = final_sql.replace("select, ", "select "); final_sql = final_sql.replace("group by )", ")"); (final_sql, a_final_drills) }
calculate
identifier_name
rca.rs
// Optimization for RCA // Ordinarily, just a, b, c, and d are scanned separately and then combined by joins. // a: (each product, each city) // can be cut on drill 1 // b: (all products, each city) // c: (each product, all cities) // can be cut on drill 1 // d: (all products, all cities) // // Note that external cuts are always valid (i.e. if above abcd were cut by a year). // // However, this results in extra scans, especially if there's no internal cuts (cuts on an rca // drill dim). // // The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the // first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill). // // In clickhouse there is no partition, so it's trickier to do what looks like two different group // by. // // The general idea is to do one group by, in which both the measure and the 2nd drill are rolled // up. // - measure is rolled up by aggregate fn (e.g. sum) // - 2nd drill is rolled up by groupArray, which just collects all the values into an array in // order. // - the original measure is also rolled up by groupArray. // // Then the pivoted table is melted using Array Join on the 2nd drill and the original measure // (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted // table. // // An example (not accounting for external cuts or dims) would be // select drill_1_id, drill_2_id, a, c from ( // select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from ( // select * from a_table // ) // group by drill_1_id // ) // array join drill_2_id_s as drill_2_id, a_s as a use itertools::join; use crate::sql::primary_agg::primary_agg; use super::{ TableSql, CutSql, DrilldownSql, MeasureSql, RcaSql, }; pub fn calculate( table: &TableSql, cuts: &[CutSql], drills: &[DrilldownSql], meas: &[MeasureSql], rca: &RcaSql, ) -> (String, String) { // append the correct rca drill to drilldowns // for a, both // for b, d2 // for c, d1 // for d, none let mut a_drills = drills.to_vec(); let mut b_drills = drills.to_vec(); let mut c_drills = drills.to_vec(); let d_drills = drills.to_vec(); a_drills.extend_from_slice(&rca.drill_1); a_drills.extend_from_slice(&rca.drill_2); b_drills.extend_from_slice(&rca.drill_2); c_drills.extend_from_slice(&rca.drill_1); println!("a: {:?}", a_drills); println!("b: {:?}", b_drills); println!("c: {:?}", c_drills); println!("d: {:?}", d_drills); // prepend the rca sql to meas let all_meas = { let mut temp = vec![rca.mea.clone()]; temp.extend_from_slice(meas); temp }; // for cuts, // - a can be cut on d1 and ext // - b cannot be int cut, only ext // - c can be cut on d1 and ext // - d cannot be int cut, only ext // // In the future, would I allow more cuts? Maybe depending on use case // // The blacklist is the drilldowns contained in each of a, b, c, d // // Note: parent of rca drills are not filtered, because they are meant // to limit the rca calculation space! // // don't need to worry about aliases, because cuts don't use aliases, // and are just matching against drill key col let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter() .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter()) .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let ac_cuts: Vec<_> = cuts.iter() .filter(|cut| { ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); let bd_cuts: Vec<_> = cuts.iter() .filter(|cut| { bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); println!("{:#?}", cuts); println!("{:#?}", ac_cuts); println!("{:#?}", bd_cuts); // now aggregate each component // // As an optimization, c is calculated from a, and d is calculated from b // If there's no internal cuts, then b, c, d are calculated from a. // First do aggregation for part a, b let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None); let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None); // replace final_m0 with letter name. // I put the rca measure at the beginning of the drills, so it should // always be m0 let a = a.replace("final_m0", "a"); let b = b.replace("final_m0", "b"); // for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when // aggregating a to c, and b to d. // (drill_2 would be needed if going from a to b) // TODO refacto these lines out to helpers let group_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column
else { format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix) } }) }); let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", "); let join_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix) } }) }); let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", "); // Do GroupArray and Array Join clauses for external measures, also let mea_cols = (1..=meas.len()) .map(|m_idx| format!("final_m{col}", col=m_idx)); let mea_cols = join(mea_cols, ", "); let group_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx)); let group_array_ext_mea = join(group_array_ext_mea, ", "); let join_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx)); let join_array_ext_mea = join(join_array_ext_mea, ", "); // groupArray cols (the drill_2 from rca) can't be included in the group by or select let c_drills_minus_rca_drill_2 = c_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", "); let d_drills_minus_rca_drill_2 = d_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", "); // a and c drills are kept as-is let a_drills_str = a_drills.iter() .map(|d| d.col_alias_only_string()); let a_drills_str = join(a_drills_str, ", "); let b_drills_str = b_drills.iter() .map(|d| d.col_alias_only_string()); let b_drills_str = join(b_drills_str, ", "); // Now add part c let ac = format!("select {}, {}{} a, c from \ (select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \ Array Join {}, {}{} a_s as a", a_drills_str, mea_cols, if mea_cols.is_empty() { "" } else { "," }, c_drills_minus_rca_drill_2, group_array_rca_drill_2, group_array_ext_mea, if group_array_ext_mea.is_empty() { "" } else { "," }, a, c_drills_minus_rca_drill_2, join_array_rca_drill_2, join_array_ext_mea, if join_array_ext_mea.is_empty() { "" } else { "," }, ); println!("{}", ac); // Now add part d let bd = if d_drills.is_empty() { format!("select {}, b, d from \ (select {}, groupArray(b) as b_s, sum(b) as d from ({})) \ Array Join {}, b_s as b", b_drills_str, group_array_rca_drill_2, b, join_array_rca_drill_2, ) } else { format!("select {}, b, d from \ (select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \ Array Join {}, b_s as b", b_drills_str, d_drills_minus_rca_drill_2, group_array_rca_drill_2, b, d_drills_minus_rca_drill_2, join_array_rca_drill_2, ) }; println!("bd: {}", bd); // now do the final join let mut final_sql = format!("select * from ({}) all inner join ({}) using {}", ac, bd, b_final_drills, ); // adding final measures at the end let final_ext_meas = if!meas.is_empty() { ", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ") } else { "".to_owned() }; final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})", a_final_drills, if rca.debug { "a, b, c, d, " } else { "" }, final_ext_meas, final_sql, ); // SPECIAL CASE // Hack to deal with no drills on d // Later, make this better final_sql = final_sql.replace("select, ", "select "); final_sql = final_sql.replace("group by )", ")"); (final_sql, a_final_drills) }
{ format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) }
conditional_block
rca.rs
// Optimization for RCA // Ordinarily, just a, b, c, and d are scanned separately and then combined by joins. // a: (each product, each city) // can be cut on drill 1 // b: (all products, each city) // c: (each product, all cities) // can be cut on drill 1 // d: (all products, all cities) // // Note that external cuts are always valid (i.e. if above abcd were cut by a year). // // However, this results in extra scans, especially if there's no internal cuts (cuts on an rca
// // The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the // first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill). // // In clickhouse there is no partition, so it's trickier to do what looks like two different group // by. // // The general idea is to do one group by, in which both the measure and the 2nd drill are rolled // up. // - measure is rolled up by aggregate fn (e.g. sum) // - 2nd drill is rolled up by groupArray, which just collects all the values into an array in // order. // - the original measure is also rolled up by groupArray. // // Then the pivoted table is melted using Array Join on the 2nd drill and the original measure // (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted // table. // // An example (not accounting for external cuts or dims) would be // select drill_1_id, drill_2_id, a, c from ( // select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from ( // select * from a_table // ) // group by drill_1_id // ) // array join drill_2_id_s as drill_2_id, a_s as a use itertools::join; use crate::sql::primary_agg::primary_agg; use super::{ TableSql, CutSql, DrilldownSql, MeasureSql, RcaSql, }; pub fn calculate( table: &TableSql, cuts: &[CutSql], drills: &[DrilldownSql], meas: &[MeasureSql], rca: &RcaSql, ) -> (String, String) { // append the correct rca drill to drilldowns // for a, both // for b, d2 // for c, d1 // for d, none let mut a_drills = drills.to_vec(); let mut b_drills = drills.to_vec(); let mut c_drills = drills.to_vec(); let d_drills = drills.to_vec(); a_drills.extend_from_slice(&rca.drill_1); a_drills.extend_from_slice(&rca.drill_2); b_drills.extend_from_slice(&rca.drill_2); c_drills.extend_from_slice(&rca.drill_1); println!("a: {:?}", a_drills); println!("b: {:?}", b_drills); println!("c: {:?}", c_drills); println!("d: {:?}", d_drills); // prepend the rca sql to meas let all_meas = { let mut temp = vec![rca.mea.clone()]; temp.extend_from_slice(meas); temp }; // for cuts, // - a can be cut on d1 and ext // - b cannot be int cut, only ext // - c can be cut on d1 and ext // - d cannot be int cut, only ext // // In the future, would I allow more cuts? Maybe depending on use case // // The blacklist is the drilldowns contained in each of a, b, c, d // // Note: parent of rca drills are not filtered, because they are meant // to limit the rca calculation space! // // don't need to worry about aliases, because cuts don't use aliases, // and are just matching against drill key col let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter() .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter()) .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let ac_cuts: Vec<_> = cuts.iter() .filter(|cut| { ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); let bd_cuts: Vec<_> = cuts.iter() .filter(|cut| { bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); println!("{:#?}", cuts); println!("{:#?}", ac_cuts); println!("{:#?}", bd_cuts); // now aggregate each component // // As an optimization, c is calculated from a, and d is calculated from b // If there's no internal cuts, then b, c, d are calculated from a. // First do aggregation for part a, b let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None); let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None); // replace final_m0 with letter name. // I put the rca measure at the beginning of the drills, so it should // always be m0 let a = a.replace("final_m0", "a"); let b = b.replace("final_m0", "b"); // for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when // aggregating a to c, and b to d. // (drill_2 would be needed if going from a to b) // TODO refacto these lines out to helpers let group_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix) } }) }); let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", "); let join_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix) } }) }); let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", "); // Do GroupArray and Array Join clauses for external measures, also let mea_cols = (1..=meas.len()) .map(|m_idx| format!("final_m{col}", col=m_idx)); let mea_cols = join(mea_cols, ", "); let group_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx)); let group_array_ext_mea = join(group_array_ext_mea, ", "); let join_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx)); let join_array_ext_mea = join(join_array_ext_mea, ", "); // groupArray cols (the drill_2 from rca) can't be included in the group by or select let c_drills_minus_rca_drill_2 = c_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", "); let d_drills_minus_rca_drill_2 = d_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", "); // a and c drills are kept as-is let a_drills_str = a_drills.iter() .map(|d| d.col_alias_only_string()); let a_drills_str = join(a_drills_str, ", "); let b_drills_str = b_drills.iter() .map(|d| d.col_alias_only_string()); let b_drills_str = join(b_drills_str, ", "); // Now add part c let ac = format!("select {}, {}{} a, c from \ (select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \ Array Join {}, {}{} a_s as a", a_drills_str, mea_cols, if mea_cols.is_empty() { "" } else { "," }, c_drills_minus_rca_drill_2, group_array_rca_drill_2, group_array_ext_mea, if group_array_ext_mea.is_empty() { "" } else { "," }, a, c_drills_minus_rca_drill_2, join_array_rca_drill_2, join_array_ext_mea, if join_array_ext_mea.is_empty() { "" } else { "," }, ); println!("{}", ac); // Now add part d let bd = if d_drills.is_empty() { format!("select {}, b, d from \ (select {}, groupArray(b) as b_s, sum(b) as d from ({})) \ Array Join {}, b_s as b", b_drills_str, group_array_rca_drill_2, b, join_array_rca_drill_2, ) } else { format!("select {}, b, d from \ (select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \ Array Join {}, b_s as b", b_drills_str, d_drills_minus_rca_drill_2, group_array_rca_drill_2, b, d_drills_minus_rca_drill_2, join_array_rca_drill_2, ) }; println!("bd: {}", bd); // now do the final join let mut final_sql = format!("select * from ({}) all inner join ({}) using {}", ac, bd, b_final_drills, ); // adding final measures at the end let final_ext_meas = if!meas.is_empty() { ", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ") } else { "".to_owned() }; final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})", a_final_drills, if rca.debug { "a, b, c, d, " } else { "" }, final_ext_meas, final_sql, ); // SPECIAL CASE // Hack to deal with no drills on d // Later, make this better final_sql = final_sql.replace("select, ", "select "); final_sql = final_sql.replace("group by )", ")"); (final_sql, a_final_drills) }
// drill dim).
random_line_split
rca.rs
// Optimization for RCA // Ordinarily, just a, b, c, and d are scanned separately and then combined by joins. // a: (each product, each city) // can be cut on drill 1 // b: (all products, each city) // c: (each product, all cities) // can be cut on drill 1 // d: (all products, all cities) // // Note that external cuts are always valid (i.e. if above abcd were cut by a year). // // However, this results in extra scans, especially if there's no internal cuts (cuts on an rca // drill dim). // // The optimization is to derive the c and d aggregates from a and b. Since cuts are allowed on the // first drill in the rca, both a and b have to be scanned (b cannot be cut on the first drill). // // In clickhouse there is no partition, so it's trickier to do what looks like two different group // by. // // The general idea is to do one group by, in which both the measure and the 2nd drill are rolled // up. // - measure is rolled up by aggregate fn (e.g. sum) // - 2nd drill is rolled up by groupArray, which just collects all the values into an array in // order. // - the original measure is also rolled up by groupArray. // // Then the pivoted table is melted using Array Join on the 2nd drill and the original measure // (which would be a or c), while preserving the aggregated measure (c or d) from the pivoted // table. // // An example (not accounting for external cuts or dims) would be // select drill_1_id, drill_2_id, a, c from ( // select drill_1_id, groupArray(drill_2_id) as drill_2_id_s, groupArray(a) a_s, sum(a) as c from ( // select * from a_table // ) // group by drill_1_id // ) // array join drill_2_id_s as drill_2_id, a_s as a use itertools::join; use crate::sql::primary_agg::primary_agg; use super::{ TableSql, CutSql, DrilldownSql, MeasureSql, RcaSql, }; pub fn calculate( table: &TableSql, cuts: &[CutSql], drills: &[DrilldownSql], meas: &[MeasureSql], rca: &RcaSql, ) -> (String, String)
println!("c: {:?}", c_drills); println!("d: {:?}", d_drills); // prepend the rca sql to meas let all_meas = { let mut temp = vec![rca.mea.clone()]; temp.extend_from_slice(meas); temp }; // for cuts, // - a can be cut on d1 and ext // - b cannot be int cut, only ext // - c can be cut on d1 and ext // - d cannot be int cut, only ext // // In the future, would I allow more cuts? Maybe depending on use case // // The blacklist is the drilldowns contained in each of a, b, c, d // // Note: parent of rca drills are not filtered, because they are meant // to limit the rca calculation space! // // don't need to worry about aliases, because cuts don't use aliases, // and are just matching against drill key col let ac_cut_cols_blacklist: Vec<_> = rca.drill_2.iter() .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let bd_cut_cols_blacklist: Vec<_> = rca.drill_1.iter().chain(rca.drill_2.iter()) .flat_map(|d| d.level_columns.iter().map(|l| l.key_column.clone())) .collect(); let ac_cuts: Vec<_> = cuts.iter() .filter(|cut| { ac_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); let bd_cuts: Vec<_> = cuts.iter() .filter(|cut| { bd_cut_cols_blacklist.iter().find(|k| **k == cut.column).is_none() }) .cloned() .collect(); println!("{:#?}", cuts); println!("{:#?}", ac_cuts); println!("{:#?}", bd_cuts); // now aggregate each component // // As an optimization, c is calculated from a, and d is calculated from b // If there's no internal cuts, then b, c, d are calculated from a. // First do aggregation for part a, b let (a, a_final_drills) = primary_agg(table, &ac_cuts, &a_drills, &all_meas, None); let (b, b_final_drills) = primary_agg(table, &bd_cuts, &b_drills, &all_meas, None); // replace final_m0 with letter name. // I put the rca measure at the beginning of the drills, so it should // always be m0 let a = a.replace("final_m0", "a"); let b = b.replace("final_m0", "b"); // for clickhouse, need to make groupArray and Array Join clauses for drill_1 for when // aggregating a to c, and b to d. // (drill_2 would be needed if going from a to b) // TODO refacto these lines out to helpers let group_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("groupArray({key_col}_{alias_postfix}) as {key_col}_{alias_postfix}_s, groupArray({name_col}_{alias_postfix}) as {name_col}_{alias_postfix}_s", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("groupArray({col}_{alias_postfix}) as {col}_{alias_postfix}_s", col=l.key_column, alias_postfix=alias_postfix) } }) }); let group_array_rca_drill_2 = join(group_array_rca_drill_2, ", "); let join_array_rca_drill_2 = rca.drill_2.iter() .flat_map(|d| { let alias_postfix = &d.alias_postfix; d.level_columns.iter().map(move |l| { if let Some(ref name_col) = l.name_column { format!("{key_col}_{alias_postfix}_s as {key_col}_{alias_postfix}, {name_col}_{alias_postfix}_s as {name_col}_{alias_postfix}", key_col=l.key_column, name_col=name_col, alias_postfix=alias_postfix) } else { format!("{col}_{alias_postfix}_s as {col}_{alias_postfix}", col=l.key_column, alias_postfix=alias_postfix) } }) }); let join_array_rca_drill_2 = join(join_array_rca_drill_2, ", "); // Do GroupArray and Array Join clauses for external measures, also let mea_cols = (1..=meas.len()) .map(|m_idx| format!("final_m{col}", col=m_idx)); let mea_cols = join(mea_cols, ", "); let group_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("groupArray(final_m{col}) as final_m{col}_s", col=m_idx)); let group_array_ext_mea = join(group_array_ext_mea, ", "); let join_array_ext_mea = (1..=meas.len()) .map(|m_idx| format!("final_m{col}_s as final_m{col}", col=m_idx)); let join_array_ext_mea = join(join_array_ext_mea, ", "); // groupArray cols (the drill_2 from rca) can't be included in the group by or select let c_drills_minus_rca_drill_2 = c_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let c_drills_minus_rca_drill_2 = join(c_drills_minus_rca_drill_2, ", "); let d_drills_minus_rca_drill_2 = d_drills.iter() .filter(|d|!rca.drill_2.contains(&d)) .map(|d| d.col_alias_only_string()); let d_drills_minus_rca_drill_2 = join(d_drills_minus_rca_drill_2, ", "); // a and c drills are kept as-is let a_drills_str = a_drills.iter() .map(|d| d.col_alias_only_string()); let a_drills_str = join(a_drills_str, ", "); let b_drills_str = b_drills.iter() .map(|d| d.col_alias_only_string()); let b_drills_str = join(b_drills_str, ", "); // Now add part c let ac = format!("select {}, {}{} a, c from \ (select {}, {}, {}{} groupArray(a) as a_s, sum(a) as c from ({}) group by {}) \ Array Join {}, {}{} a_s as a", a_drills_str, mea_cols, if mea_cols.is_empty() { "" } else { "," }, c_drills_minus_rca_drill_2, group_array_rca_drill_2, group_array_ext_mea, if group_array_ext_mea.is_empty() { "" } else { "," }, a, c_drills_minus_rca_drill_2, join_array_rca_drill_2, join_array_ext_mea, if join_array_ext_mea.is_empty() { "" } else { "," }, ); println!("{}", ac); // Now add part d let bd = if d_drills.is_empty() { format!("select {}, b, d from \ (select {}, groupArray(b) as b_s, sum(b) as d from ({})) \ Array Join {}, b_s as b", b_drills_str, group_array_rca_drill_2, b, join_array_rca_drill_2, ) } else { format!("select {}, b, d from \ (select {}, {}, groupArray(b) as b_s, sum(b) as d from ({}) group by {}) \ Array Join {}, b_s as b", b_drills_str, d_drills_minus_rca_drill_2, group_array_rca_drill_2, b, d_drills_minus_rca_drill_2, join_array_rca_drill_2, ) }; println!("bd: {}", bd); // now do the final join let mut final_sql = format!("select * from ({}) all inner join ({}) using {}", ac, bd, b_final_drills, ); // adding final measures at the end let final_ext_meas = if!meas.is_empty() { ", ".to_owned() + &join((1..meas.len()+1).map(|i| format!("final_m{}", i)), ", ") } else { "".to_owned() }; final_sql = format!("select {}, {}((a/b) / (c/d)) as rca{} from ({})", a_final_drills, if rca.debug { "a, b, c, d, " } else { "" }, final_ext_meas, final_sql, ); // SPECIAL CASE // Hack to deal with no drills on d // Later, make this better final_sql = final_sql.replace("select, ", "select "); final_sql = final_sql.replace("group by )", ")"); (final_sql, a_final_drills) }
{ // append the correct rca drill to drilldowns // for a, both // for b, d2 // for c, d1 // for d, none let mut a_drills = drills.to_vec(); let mut b_drills = drills.to_vec(); let mut c_drills = drills.to_vec(); let d_drills = drills.to_vec(); a_drills.extend_from_slice(&rca.drill_1); a_drills.extend_from_slice(&rca.drill_2); b_drills.extend_from_slice(&rca.drill_2); c_drills.extend_from_slice(&rca.drill_1); println!("a: {:?}", a_drills); println!("b: {:?}", b_drills);
identifier_body
mod.rs
use std::cell::RefCell; use std::collections::HashMap; use std::fs::{self, File}; use std::path::Path; use glium; use glium::backend::Facade; use image::{self, DynamicImage, GenericImage, Rgba}; use texture_packer::Rect; use texture_packer::SkylinePacker; use texture_packer::{TexturePacker, TexturePackerConfig}; use texture_packer::importer::ImageImporter; use texture_packer::exporter::ImageExporter; mod config; pub mod font; pub mod texture_atlas; use self::config::TileAtlasConfig; pub type Texture2d = glium::texture::CompressedSrgbTexture2d; type AnimFrames = u64; type AnimMillisDelay = u64; #[derive(Serialize, Deserialize, Clone)] pub enum TileKind { Static, Animated(AnimFrames, AnimMillisDelay), } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct AtlasRect { pub x: u32, pub y: u32, pub w: u32, pub h: u32, } impl From<Rect> for AtlasRect { fn from(rect: Rect) -> AtlasRect { AtlasRect { x: rect.x, y: rect.y, w: rect.w, h: rect.h, } } } pub type AtlasTextureRegion = (f32, f32, f32, f32); pub enum TileShape { Static, Autotile, Wall, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTileData { pub offset: (u32, u32), pub is_autotile: bool, pub tile_kind: TileKind, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTile { pub data: AtlasTileData, pub cached_rect: RefCell<Option<AtlasTextureRegion>>, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasFrame { tile_size: (u32, u32), texture_idx: usize, rect: AtlasRect, offsets: HashMap<String, AtlasTile>, } impl AtlasFrame { pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self { AtlasFrame { tile_size: tile_size, texture_idx: texture_idx, rect: AtlasRect::from(rect), offsets: HashMap::new(), } } } pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>; pub struct TileAtlas { config: TileAtlasConfig, textures: Vec<Texture2d>, indices: Vec<String>, } pub struct TileAtlasBuilder<'a> { locations: HashMap<String, String>, frames: HashMap<String, AtlasFrame>, packers: Vec<TilePacker<'a>>, pub file_hash: String, } impl <'a> TileAtlasBuilder<'a> { pub fn new() -> Self { let mut builder = TileAtlasBuilder { locations: HashMap::new(), frames: HashMap::new(), packers: Vec::new(), file_hash: String::new(), }; builder.add_packer(); builder } pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) { let key = path_str.to_string(); assert!(self.frames.contains_key(&path_str.to_string())); { let mut frame = self.frames.get_mut(&key).unwrap(); assert!(!frame.offsets.contains_key(&index)); let tile = AtlasTile { data: tile_data, cached_rect: RefCell::new(None), }; frame.offsets.insert(index.clone(), tile); self.locations.insert(index, key); } } pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) { if self.frames.contains_key(path_string) { return; } let path = Path::new(&path_string); let texture = ImageImporter::import_from_file(path).unwrap(); for (idx, packer) in self.packers.iter_mut().enumerate() { if packer.can_pack(&texture) { packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size)); // cannot return self here, since self already borrowed, so // cannot use builder pattern. return; } } self.add_packer(); { // complains that borrow doesn't last long enough // len mut packer = self.newest_packer_mut(); let packer_idx = self.packers.len() - 1; let mut packer = &mut self.packers[packer_idx]; packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(&path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size)); } } fn add_packer(&mut self) { let config = TexturePackerConfig { max_width: 2048, max_height: 2048, allow_rotation: false, texture_outlines: false, trim: false, texture_padding: 0, ..Default::default() }; self.packers.push(TexturePacker::new_skyline(config)); } pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas { let mut textures = Vec::new(); let packed_folder_path = config::get_config_cache_path(packed_tex_folder); if Path::exists(packed_folder_path.as_path()) { fs::remove_dir_all(packed_folder_path.as_path()).unwrap(); } fs::create_dir_all(packed_folder_path.as_path()).unwrap(); for (idx, packer) in self.packers.iter().enumerate() { let image = ImageExporter::export(packer).unwrap(); let mut file_path = packed_folder_path.clone(); file_path.push(&format!("{}.png", idx)); let mut file = File::create(file_path).unwrap(); image.save(&mut file, image::PNG).unwrap(); textures.push(make_texture(display, image)); } println!("Saved {}", packed_tex_folder); let config = TileAtlasConfig { locations: self.locations.clone(), frames: self.frames.clone(), file_hash: self.file_hash.clone(), }; config::write_tile_atlas_config(&config, packed_tex_folder); TileAtlas::new(config, textures) } } impl TileAtlas { pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self { let mut atlas = TileAtlas { config: config, textures: textures, indices: Vec::new(), }; atlas.cache_tile_regions(); atlas } /// Precalculates the UV rectangles for individual tiles to avoid the /// overhead of recalculationg them on lookup. It must be done before the /// tile atlas can be used. fn cache_tile_regions(&mut self) { for frame in self.config.frames.values() { let (frame_w, frame_h) = self.frame_size(frame); for (tile_type, tile) in frame.offsets.iter() { let tex_ratio = self.get_sprite_tex_ratio(tile_type); let add_offset = get_add_offset(&frame.rect, &frame.tile_size); let ratio = if tile.data.is_autotile { 2 } else { 1 }; let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0]; let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1]; let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32; let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32; *tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th)); } } self.indices = self.config.locations.keys().map(|l| l.to_string()).collect(); } fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32)
fn texture_size(&self, texture_idx: usize) -> (u32, u32) { self.textures[texture_idx].dimensions() } fn get_frame(&self, tile_type: &str) -> &AtlasFrame { let tex_name = &self.config.locations[tile_type]; &self.config.frames[tex_name] } pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize { self.get_frame(tile_type).texture_idx } pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] { let dimensions = self.texture_size(texture_idx); let cols: u32 = dimensions.0 / 24; let rows: u32 = dimensions.1 / 24; [1.0 / cols as f32, 1.0 / rows as f32] } pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] { let frame = self.get_frame(tile_type); let (mut sx, mut sy) = frame.tile_size; if frame.offsets[tile_type].data.is_autotile { // divide the autotile into 24x24 from 48x48 sx /= 2; sy /= 2; } let dimensions = self.frame_size(frame); let cols: f32 = dimensions.0 as f32 / sx as f32; let rows: f32 = dimensions.1 as f32 / sy as f32; [1.0 / cols, 1.0 / rows] } pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) { self.get_frame(tile_type).tile_size } pub fn get_tile(&self, tile_type: &str) -> &AtlasTile { let frame = self.get_frame(tile_type); &frame.offsets[tile_type] } pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) { let frame = self.get_frame(tile_type); let tile = &frame.offsets[tile_type]; let (mut tx, ty, tw, _) = tile.cached_rect.borrow() .expect("Texture atlas regions weren't cached yet."); match tile.data.tile_kind { TileKind::Static => (), TileKind::Animated(frame_count, delay) => { let current_frame = msecs / delay; let x_index_offset = current_frame % frame_count; tx += x_index_offset as f32 * tw; } } (tx, ty) } pub fn get_tile_index(&self, tile_kind: &str) -> usize { self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0 } fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String { &self.indices[tile_idx] } pub fn get_texture_offset_indexed(&self, tile_idx: usize, msecs: u64) -> (f32, f32) { let kind = self.get_tile_kind_indexed(tile_idx); self.get_texture_offset(kind, msecs) } pub fn get_texture(&self, idx: usize) -> &Texture2d { &self.textures[idx] } pub fn passes(&self) -> usize { self.textures.len() } } fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) { let ceil = |a, b| (a + b - 1) / b; let cols: u32 = ceil(rect.x, tile_size.0); let rows: u32 = ceil(rect.y, tile_size.1); (cols, rows) } pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d { let dimensions = image.dimensions(); let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions); Texture2d::new(display, image).unwrap() }
{ self.texture_size(frame.texture_idx) }
identifier_body
mod.rs
use std::cell::RefCell; use std::collections::HashMap; use std::fs::{self, File}; use std::path::Path; use glium; use glium::backend::Facade; use image::{self, DynamicImage, GenericImage, Rgba}; use texture_packer::Rect; use texture_packer::SkylinePacker; use texture_packer::{TexturePacker, TexturePackerConfig}; use texture_packer::importer::ImageImporter; use texture_packer::exporter::ImageExporter; mod config; pub mod font; pub mod texture_atlas; use self::config::TileAtlasConfig; pub type Texture2d = glium::texture::CompressedSrgbTexture2d; type AnimFrames = u64; type AnimMillisDelay = u64; #[derive(Serialize, Deserialize, Clone)] pub enum TileKind { Static, Animated(AnimFrames, AnimMillisDelay), } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct AtlasRect { pub x: u32, pub y: u32, pub w: u32, pub h: u32, } impl From<Rect> for AtlasRect { fn from(rect: Rect) -> AtlasRect { AtlasRect { x: rect.x, y: rect.y, w: rect.w, h: rect.h, } } } pub type AtlasTextureRegion = (f32, f32, f32, f32); pub enum TileShape { Static, Autotile, Wall, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTileData { pub offset: (u32, u32), pub is_autotile: bool, pub tile_kind: TileKind, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTile { pub data: AtlasTileData, pub cached_rect: RefCell<Option<AtlasTextureRegion>>, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasFrame { tile_size: (u32, u32), texture_idx: usize, rect: AtlasRect, offsets: HashMap<String, AtlasTile>, } impl AtlasFrame { pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self { AtlasFrame { tile_size: tile_size, texture_idx: texture_idx, rect: AtlasRect::from(rect), offsets: HashMap::new(), } } } pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>; pub struct TileAtlas { config: TileAtlasConfig, textures: Vec<Texture2d>, indices: Vec<String>, } pub struct TileAtlasBuilder<'a> { locations: HashMap<String, String>, frames: HashMap<String, AtlasFrame>, packers: Vec<TilePacker<'a>>, pub file_hash: String, } impl <'a> TileAtlasBuilder<'a> { pub fn new() -> Self { let mut builder = TileAtlasBuilder { locations: HashMap::new(), frames: HashMap::new(), packers: Vec::new(), file_hash: String::new(), }; builder.add_packer(); builder } pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) { let key = path_str.to_string(); assert!(self.frames.contains_key(&path_str.to_string())); { let mut frame = self.frames.get_mut(&key).unwrap(); assert!(!frame.offsets.contains_key(&index)); let tile = AtlasTile { data: tile_data, cached_rect: RefCell::new(None), }; frame.offsets.insert(index.clone(), tile); self.locations.insert(index, key); } } pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) { if self.frames.contains_key(path_string) { return; } let path = Path::new(&path_string); let texture = ImageImporter::import_from_file(path).unwrap(); for (idx, packer) in self.packers.iter_mut().enumerate() { if packer.can_pack(&texture) { packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size)); // cannot return self here, since self already borrowed, so // cannot use builder pattern. return; } } self.add_packer(); { // complains that borrow doesn't last long enough // len mut packer = self.newest_packer_mut(); let packer_idx = self.packers.len() - 1; let mut packer = &mut self.packers[packer_idx]; packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(&path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size)); } } fn add_packer(&mut self) { let config = TexturePackerConfig { max_width: 2048, max_height: 2048, allow_rotation: false, texture_outlines: false, trim: false, texture_padding: 0, ..Default::default() }; self.packers.push(TexturePacker::new_skyline(config)); } pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas { let mut textures = Vec::new(); let packed_folder_path = config::get_config_cache_path(packed_tex_folder); if Path::exists(packed_folder_path.as_path()) { fs::remove_dir_all(packed_folder_path.as_path()).unwrap(); } fs::create_dir_all(packed_folder_path.as_path()).unwrap(); for (idx, packer) in self.packers.iter().enumerate() { let image = ImageExporter::export(packer).unwrap(); let mut file_path = packed_folder_path.clone(); file_path.push(&format!("{}.png", idx)); let mut file = File::create(file_path).unwrap(); image.save(&mut file, image::PNG).unwrap(); textures.push(make_texture(display, image)); } println!("Saved {}", packed_tex_folder); let config = TileAtlasConfig { locations: self.locations.clone(), frames: self.frames.clone(), file_hash: self.file_hash.clone(), }; config::write_tile_atlas_config(&config, packed_tex_folder); TileAtlas::new(config, textures) } } impl TileAtlas { pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self { let mut atlas = TileAtlas { config: config, textures: textures, indices: Vec::new(), }; atlas.cache_tile_regions(); atlas } /// Precalculates the UV rectangles for individual tiles to avoid the /// overhead of recalculationg them on lookup. It must be done before the /// tile atlas can be used. fn cache_tile_regions(&mut self) { for frame in self.config.frames.values() { let (frame_w, frame_h) = self.frame_size(frame); for (tile_type, tile) in frame.offsets.iter() { let tex_ratio = self.get_sprite_tex_ratio(tile_type); let add_offset = get_add_offset(&frame.rect, &frame.tile_size); let ratio = if tile.data.is_autotile { 2 } else { 1 }; let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0]; let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1]; let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32; let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32; *tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th)); } } self.indices = self.config.locations.keys().map(|l| l.to_string()).collect(); } fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32) { self.texture_size(frame.texture_idx) } fn texture_size(&self, texture_idx: usize) -> (u32, u32) { self.textures[texture_idx].dimensions() } fn get_frame(&self, tile_type: &str) -> &AtlasFrame { let tex_name = &self.config.locations[tile_type]; &self.config.frames[tex_name] } pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize { self.get_frame(tile_type).texture_idx } pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] { let dimensions = self.texture_size(texture_idx); let cols: u32 = dimensions.0 / 24; let rows: u32 = dimensions.1 / 24; [1.0 / cols as f32, 1.0 / rows as f32] } pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] { let frame = self.get_frame(tile_type); let (mut sx, mut sy) = frame.tile_size; if frame.offsets[tile_type].data.is_autotile { // divide the autotile into 24x24 from 48x48 sx /= 2; sy /= 2; } let dimensions = self.frame_size(frame); let cols: f32 = dimensions.0 as f32 / sx as f32; let rows: f32 = dimensions.1 as f32 / sy as f32; [1.0 / cols, 1.0 / rows] } pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) { self.get_frame(tile_type).tile_size } pub fn get_tile(&self, tile_type: &str) -> &AtlasTile { let frame = self.get_frame(tile_type); &frame.offsets[tile_type] } pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) { let frame = self.get_frame(tile_type); let tile = &frame.offsets[tile_type]; let (mut tx, ty, tw, _) = tile.cached_rect.borrow() .expect("Texture atlas regions weren't cached yet."); match tile.data.tile_kind { TileKind::Static => (), TileKind::Animated(frame_count, delay) => { let current_frame = msecs / delay; let x_index_offset = current_frame % frame_count; tx += x_index_offset as f32 * tw; } } (tx, ty) } pub fn get_tile_index(&self, tile_kind: &str) -> usize { self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0 } fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String { &self.indices[tile_idx] } pub fn
(&self, tile_idx: usize, msecs: u64) -> (f32, f32) { let kind = self.get_tile_kind_indexed(tile_idx); self.get_texture_offset(kind, msecs) } pub fn get_texture(&self, idx: usize) -> &Texture2d { &self.textures[idx] } pub fn passes(&self) -> usize { self.textures.len() } } fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) { let ceil = |a, b| (a + b - 1) / b; let cols: u32 = ceil(rect.x, tile_size.0); let rows: u32 = ceil(rect.y, tile_size.1); (cols, rows) } pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d { let dimensions = image.dimensions(); let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions); Texture2d::new(display, image).unwrap() }
get_texture_offset_indexed
identifier_name
mod.rs
use std::cell::RefCell; use std::collections::HashMap; use std::fs::{self, File}; use std::path::Path; use glium; use glium::backend::Facade; use image::{self, DynamicImage, GenericImage, Rgba}; use texture_packer::Rect; use texture_packer::SkylinePacker; use texture_packer::{TexturePacker, TexturePackerConfig}; use texture_packer::importer::ImageImporter; use texture_packer::exporter::ImageExporter; mod config; pub mod font; pub mod texture_atlas; use self::config::TileAtlasConfig; pub type Texture2d = glium::texture::CompressedSrgbTexture2d; type AnimFrames = u64; type AnimMillisDelay = u64; #[derive(Serialize, Deserialize, Clone)] pub enum TileKind { Static, Animated(AnimFrames, AnimMillisDelay), } #[derive(Serialize, Deserialize, Clone, Debug)] pub struct AtlasRect { pub x: u32, pub y: u32, pub w: u32, pub h: u32, } impl From<Rect> for AtlasRect { fn from(rect: Rect) -> AtlasRect { AtlasRect { x: rect.x, y: rect.y, w: rect.w, h: rect.h, } } } pub type AtlasTextureRegion = (f32, f32, f32, f32); pub enum TileShape { Static, Autotile, Wall, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTileData { pub offset: (u32, u32), pub is_autotile: bool, pub tile_kind: TileKind, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasTile { pub data: AtlasTileData, pub cached_rect: RefCell<Option<AtlasTextureRegion>>, } #[derive(Serialize, Deserialize, Clone)] pub struct AtlasFrame { tile_size: (u32, u32), texture_idx: usize, rect: AtlasRect,
pub fn new(texture_idx: usize, rect: Rect, tile_size: (u32, u32)) -> Self { AtlasFrame { tile_size: tile_size, texture_idx: texture_idx, rect: AtlasRect::from(rect), offsets: HashMap::new(), } } } pub type TilePacker<'a> = TexturePacker<'a, DynamicImage, SkylinePacker<Rgba<u8>>>; pub struct TileAtlas { config: TileAtlasConfig, textures: Vec<Texture2d>, indices: Vec<String>, } pub struct TileAtlasBuilder<'a> { locations: HashMap<String, String>, frames: HashMap<String, AtlasFrame>, packers: Vec<TilePacker<'a>>, pub file_hash: String, } impl <'a> TileAtlasBuilder<'a> { pub fn new() -> Self { let mut builder = TileAtlasBuilder { locations: HashMap::new(), frames: HashMap::new(), packers: Vec::new(), file_hash: String::new(), }; builder.add_packer(); builder } pub fn add_tile(&mut self, path_str: &str, index: String, tile_data: AtlasTileData) { let key = path_str.to_string(); assert!(self.frames.contains_key(&path_str.to_string())); { let mut frame = self.frames.get_mut(&key).unwrap(); assert!(!frame.offsets.contains_key(&index)); let tile = AtlasTile { data: tile_data, cached_rect: RefCell::new(None), }; frame.offsets.insert(index.clone(), tile); self.locations.insert(index, key); } } pub fn add_frame(&mut self, path_string: &str, tile_size: (u32, u32)) { if self.frames.contains_key(path_string) { return; } let path = Path::new(&path_string); let texture = ImageImporter::import_from_file(path).unwrap(); for (idx, packer) in self.packers.iter_mut().enumerate() { if packer.can_pack(&texture) { packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(idx, rect, tile_size)); // cannot return self here, since self already borrowed, so // cannot use builder pattern. return; } } self.add_packer(); { // complains that borrow doesn't last long enough // len mut packer = self.newest_packer_mut(); let packer_idx = self.packers.len() - 1; let mut packer = &mut self.packers[packer_idx]; packer.pack_own(path_string.to_string(), texture).unwrap(); let rect = packer.get_frame(&path_string).unwrap().frame; self.frames.insert(path_string.to_string(), AtlasFrame::new(packer_idx, rect, tile_size)); } } fn add_packer(&mut self) { let config = TexturePackerConfig { max_width: 2048, max_height: 2048, allow_rotation: false, texture_outlines: false, trim: false, texture_padding: 0, ..Default::default() }; self.packers.push(TexturePacker::new_skyline(config)); } pub fn build<F: Facade>(&self, display: &F, packed_tex_folder: &str) -> TileAtlas { let mut textures = Vec::new(); let packed_folder_path = config::get_config_cache_path(packed_tex_folder); if Path::exists(packed_folder_path.as_path()) { fs::remove_dir_all(packed_folder_path.as_path()).unwrap(); } fs::create_dir_all(packed_folder_path.as_path()).unwrap(); for (idx, packer) in self.packers.iter().enumerate() { let image = ImageExporter::export(packer).unwrap(); let mut file_path = packed_folder_path.clone(); file_path.push(&format!("{}.png", idx)); let mut file = File::create(file_path).unwrap(); image.save(&mut file, image::PNG).unwrap(); textures.push(make_texture(display, image)); } println!("Saved {}", packed_tex_folder); let config = TileAtlasConfig { locations: self.locations.clone(), frames: self.frames.clone(), file_hash: self.file_hash.clone(), }; config::write_tile_atlas_config(&config, packed_tex_folder); TileAtlas::new(config, textures) } } impl TileAtlas { pub fn new(config: TileAtlasConfig, textures: Vec<Texture2d>) -> Self { let mut atlas = TileAtlas { config: config, textures: textures, indices: Vec::new(), }; atlas.cache_tile_regions(); atlas } /// Precalculates the UV rectangles for individual tiles to avoid the /// overhead of recalculationg them on lookup. It must be done before the /// tile atlas can be used. fn cache_tile_regions(&mut self) { for frame in self.config.frames.values() { let (frame_w, frame_h) = self.frame_size(frame); for (tile_type, tile) in frame.offsets.iter() { let tex_ratio = self.get_sprite_tex_ratio(tile_type); let add_offset = get_add_offset(&frame.rect, &frame.tile_size); let ratio = if tile.data.is_autotile { 2 } else { 1 }; let tx = ((tile.data.offset.0 + add_offset.0) * ratio) as f32 * tex_ratio[0]; let ty = ((tile.data.offset.1 + add_offset.1) * ratio) as f32 * tex_ratio[1]; let tw = (frame.tile_size.0 * ratio) as f32 / frame_w as f32; let th = (frame.tile_size.1 * ratio) as f32 / frame_h as f32; *tile.cached_rect.borrow_mut() = Some((tx, ty, tw, th)); } } self.indices = self.config.locations.keys().map(|l| l.to_string()).collect(); } fn frame_size(&self, frame: &AtlasFrame) -> (u32, u32) { self.texture_size(frame.texture_idx) } fn texture_size(&self, texture_idx: usize) -> (u32, u32) { self.textures[texture_idx].dimensions() } fn get_frame(&self, tile_type: &str) -> &AtlasFrame { let tex_name = &self.config.locations[tile_type]; &self.config.frames[tex_name] } pub fn get_tile_texture_idx(&self, tile_type: &str) -> usize { self.get_frame(tile_type).texture_idx } pub fn get_tilemap_tex_ratio(&self, texture_idx: usize) -> [f32; 2] { let dimensions = self.texture_size(texture_idx); let cols: u32 = dimensions.0 / 24; let rows: u32 = dimensions.1 / 24; [1.0 / cols as f32, 1.0 / rows as f32] } pub fn get_sprite_tex_ratio(&self, tile_type: &str) -> [f32; 2] { let frame = self.get_frame(tile_type); let (mut sx, mut sy) = frame.tile_size; if frame.offsets[tile_type].data.is_autotile { // divide the autotile into 24x24 from 48x48 sx /= 2; sy /= 2; } let dimensions = self.frame_size(frame); let cols: f32 = dimensions.0 as f32 / sx as f32; let rows: f32 = dimensions.1 as f32 / sy as f32; [1.0 / cols, 1.0 / rows] } pub fn get_tile_texture_size(&self, tile_type: &str) -> (u32, u32) { self.get_frame(tile_type).tile_size } pub fn get_tile(&self, tile_type: &str) -> &AtlasTile { let frame = self.get_frame(tile_type); &frame.offsets[tile_type] } pub fn get_texture_offset(&self, tile_type: &str, msecs: u64) -> (f32, f32) { let frame = self.get_frame(tile_type); let tile = &frame.offsets[tile_type]; let (mut tx, ty, tw, _) = tile.cached_rect.borrow() .expect("Texture atlas regions weren't cached yet."); match tile.data.tile_kind { TileKind::Static => (), TileKind::Animated(frame_count, delay) => { let current_frame = msecs / delay; let x_index_offset = current_frame % frame_count; tx += x_index_offset as f32 * tw; } } (tx, ty) } pub fn get_tile_index(&self, tile_kind: &str) -> usize { self.indices.iter().enumerate().find(|&(_, i)| i == tile_kind).unwrap().0 } fn get_tile_kind_indexed(&self, tile_idx: usize) -> &String { &self.indices[tile_idx] } pub fn get_texture_offset_indexed(&self, tile_idx: usize, msecs: u64) -> (f32, f32) { let kind = self.get_tile_kind_indexed(tile_idx); self.get_texture_offset(kind, msecs) } pub fn get_texture(&self, idx: usize) -> &Texture2d { &self.textures[idx] } pub fn passes(&self) -> usize { self.textures.len() } } fn get_add_offset(rect: &AtlasRect, tile_size: &(u32, u32)) -> (u32, u32) { let ceil = |a, b| (a + b - 1) / b; let cols: u32 = ceil(rect.x, tile_size.0); let rows: u32 = ceil(rect.y, tile_size.1); (cols, rows) } pub fn make_texture<F: Facade>(display: &F, image: DynamicImage) -> Texture2d { let dimensions = image.dimensions(); let image = glium::texture::RawImage2d::from_raw_rgba_reversed(image.to_rgba().into_raw(), dimensions); Texture2d::new(display, image).unwrap() }
offsets: HashMap<String, AtlasTile>, } impl AtlasFrame {
random_line_split
unbond.rs
use crate::contract::{query_total_issued, slashing}; use crate::state::{ get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters, read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state, store_unbond_history, store_unbond_wait_list, UnbondHistory, }; use cosmwasm_std::{ coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse, HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg, }; use cw20::Cw20HandleMsg; use rand::{Rng, SeedableRng, XorShiftRng}; use signed_integer::SignedInt; /// This message must be call by receive_cw20 /// This message will undelegate coin and burn basset token pub(crate) fn handle_unbond<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, amount: Uint128, sender: HumanAddr, ) -> StdResult<HandleResponse>
let max_peg_fee = amount * recovery_fee; let required_peg_fee = ((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?; let peg_fee = Uint128::min(max_peg_fee, required_peg_fee); amount_with_fee = (amount - peg_fee)?; } else { amount_with_fee = amount; } current_batch.requested_with_fee += amount_with_fee; store_unbond_wait_list( &mut deps.storage, current_batch.id, sender.clone(), amount_with_fee, )?; total_supply = (total_supply - amount).expect("the requested can not be more than the total supply"); // Update exchange rate state.update_exchange_rate(total_supply, current_batch.requested_with_fee); let current_time = env.block.time; let passed_time = current_time - state.last_unbonded_time; let mut messages: Vec<CosmosMsg> = vec![]; // If the epoch period is passed, the undelegate message would be sent. if passed_time > epoch_period { // Apply the current exchange rate. let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate; // the contract must stop if if undelegation_amount == Uint128(1) { return Err(StdError::generic_err( "Burn amount must be greater than 1 ubluna", )); } let delegator = env.contract.address; let block_height = env.block.height; // Send undelegated requests to possibly more than one validators let mut undelegated_msgs = pick_validator(deps, undelegation_amount, delegator, block_height)?; messages.append(&mut undelegated_msgs); state.total_bond_amount = (state.total_bond_amount - undelegation_amount) .expect("undelegation amount can not be more than stored total bonded amount"); // Store history for withdraw unbonded let history = UnbondHistory { batch_id: current_batch.id, time: env.block.time, amount: current_batch.requested_with_fee, applied_exchange_rate: state.exchange_rate, withdraw_rate: state.exchange_rate, released: false, }; store_unbond_history(&mut deps.storage, current_batch.id, history)?; // batch info must be updated to new batch current_batch.id += 1; current_batch.requested_with_fee = Uint128::zero(); // state.last_unbonded_time must be updated to the current block time state.last_unbonded_time = env.block.time; } // Store the new requested_with_fee or id in the current batch store_current_batch(&mut deps.storage).save(&current_batch)?; // Store state's new exchange rate store_state(&mut deps.storage).save(&state)?; // Send Burn message to token contract let config = read_config(&deps.storage).load()?; let token_address = deps.api.human_address( &config .token_contract .expect("the token contract must have been registered"), )?; let burn_msg = Cw20HandleMsg::Burn { amount }; messages.push(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: token_address, msg: to_binary(&burn_msg)?, send: vec![], })); let res = HandleResponse { messages, log: vec![ log("action", "burn"), log("from", sender), log("burnt_amount", amount), log("unbonded_amount", amount_with_fee), ], data: None, }; Ok(res) } pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, ) -> StdResult<HandleResponse> { let sender_human = env.message.sender.clone(); let contract_address = env.contract.address.clone(); // read params let params = read_parameters(&deps.storage).load()?; let unbonding_period = params.unbonding_period; let coin_denom = params.underlying_coin_denom; let historical_time = env.block.time - unbonding_period; // query hub balance for process withdraw rate. let hub_balance = deps .querier .query_balance(&env.contract.address, &*coin_denom)? .amount; // calculate withdraw rate for user requests process_withdraw_rate(deps, historical_time, hub_balance)?; let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap(); if withdraw_amount.is_zero() { return Err(StdError::generic_err(format!( "No withdrawable {} assets are available yet", coin_denom ))); } // remove the previous batches for the user let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?; remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?; // Update previous balance used for calculation in next Luna batch release let prev_balance = (hub_balance - withdraw_amount)?; store_state(&mut deps.storage).update(|mut last_state| { last_state.prev_hub_balance = prev_balance; Ok(last_state) })?; // Send the money to the user let msgs = vec![BankMsg::Send { from_address: contract_address.clone(), to_address: sender_human, amount: coins(withdraw_amount.u128(), &*coin_denom), } .into()]; let res = HandleResponse { messages: msgs, log: vec![ log("action", "finish_burn"), log("from", contract_address), log("amount", withdraw_amount), ], data: None, }; Ok(res) } /// This is designed for an accurate unbonded amount calculation. /// Execute while processing withdraw_unbonded fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, historical_time: u64, hub_balance: Uint128, ) -> StdResult<()> { // balance change of the hub contract must be checked. let mut total_unbonded_amount = Uint128::zero(); let mut state = read_state(&deps.storage).load()?; let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance); state.actual_unbonded_amount += balance_change.0; let last_processed_batch = state.last_processed_batch; let mut batch_count: u64 = 0; // Iterate over unbonded histories that have been processed // to calculate newly added unbonded amount let mut i = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, i) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h.clone(); } else { break; } } Err(_) => break, } let burnt_amount = history.amount; let historical_rate = history.withdraw_rate; let unbonded_amount = burnt_amount * historical_rate; total_unbonded_amount += unbonded_amount; batch_count += 1; i += 1; } if batch_count >= 1 { // Use signed integer in case of some rogue transfers. let slashed_amount = SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount); // Iterate again to calculate the withdraw rate for each unprocessed history let mut iterator = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, iterator) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h } else { break; } } Err(_) => { break; } } let burnt_amount_of_batch = history.amount; let historical_rate_of_batch = history.withdraw_rate; let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch; // the slashed amount for each batch must be proportional to the unbonded amount of batch let batch_slashing_weight = Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount); let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0; let actual_unbonded_amount_of_batch: Uint128; // If slashed amount is negative, there should be summation instead of subtraction. if slashed_amount.1 { slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?; actual_unbonded_amount_of_batch = unbonded_amount_of_batch + slashed_amount_of_batch; } else { if slashed_amount.0.u128()!= 0u128 { slashed_amount_of_batch += Uint128(1); } actual_unbonded_amount_of_batch = SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch) .0; } // Calculate the new withdraw rate let new_withdraw_rate = Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch); let mut history_for_i = history; // store the history and mark it as released history_for_i.withdraw_rate = new_withdraw_rate; history_for_i.released = true; store_unbond_history(&mut deps.storage, iterator, history_for_i)?; state.last_processed_batch = iterator; iterator += 1; } } // Store state.actual_unbonded_amount for future new batches release state.actual_unbonded_amount = Uint128::zero(); store_state(&mut deps.storage).save(&state)?; Ok(()) } fn pick_validator<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, claim: Uint128, delegator: HumanAddr, block_height: u64, ) -> StdResult<Vec<CosmosMsg>> { //read params let params = read_parameters(&deps.storage).load()?; let coin_denom = params.underlying_coin_denom; let mut messages: Vec<CosmosMsg> = vec![]; let mut claimed = claim; let all_delegations = deps .querier .query_all_delegations(delegator) .expect("There must be at least one delegation"); // pick a random validator // if it does not have requested amount, undelegate all it has // and pick another random validator let mut iteration_index = 0; let mut deletable_delegations = all_delegations; while claimed.0 > 0 { let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index); let random_index = rng.gen_range(0, deletable_delegations.len()); let delegation = deletable_delegations.remove(random_index); let val = delegation.amount.amount; let undelegated_amount: Uint128; if val.0 > claimed.0 { undelegated_amount = claimed; claimed = Uint128::zero(); } else { undelegated_amount = val; claimed = (claimed - val)?; } if undelegated_amount.0 > 0 { let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate { validator: delegation.validator, amount: coin(undelegated_amount.0, &*coin_denom), }); messages.push(msgs); } iteration_index += 1; } Ok(messages) }
{ // Read params let params = read_parameters(&deps.storage).load()?; let epoch_period = params.epoch_period; let threshold = params.er_threshold; let recovery_fee = params.peg_recovery_fee; let mut current_batch = read_current_batch(&deps.storage).load()?; // Check slashing, update state, and calculate the new exchange rate. slashing(deps, env.clone())?; let mut state = read_state(&deps.storage).load()?; let mut total_supply = query_total_issued(&deps).unwrap_or_default(); // Collect all the requests within a epoch period // Apply peg recovery fee let amount_with_fee: Uint128; if state.exchange_rate < threshold {
identifier_body
unbond.rs
use crate::contract::{query_total_issued, slashing}; use crate::state::{ get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters, read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state, store_unbond_history, store_unbond_wait_list, UnbondHistory, }; use cosmwasm_std::{ coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse, HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg, }; use cw20::Cw20HandleMsg; use rand::{Rng, SeedableRng, XorShiftRng}; use signed_integer::SignedInt; /// This message must be call by receive_cw20 /// This message will undelegate coin and burn basset token pub(crate) fn
<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, amount: Uint128, sender: HumanAddr, ) -> StdResult<HandleResponse> { // Read params let params = read_parameters(&deps.storage).load()?; let epoch_period = params.epoch_period; let threshold = params.er_threshold; let recovery_fee = params.peg_recovery_fee; let mut current_batch = read_current_batch(&deps.storage).load()?; // Check slashing, update state, and calculate the new exchange rate. slashing(deps, env.clone())?; let mut state = read_state(&deps.storage).load()?; let mut total_supply = query_total_issued(&deps).unwrap_or_default(); // Collect all the requests within a epoch period // Apply peg recovery fee let amount_with_fee: Uint128; if state.exchange_rate < threshold { let max_peg_fee = amount * recovery_fee; let required_peg_fee = ((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?; let peg_fee = Uint128::min(max_peg_fee, required_peg_fee); amount_with_fee = (amount - peg_fee)?; } else { amount_with_fee = amount; } current_batch.requested_with_fee += amount_with_fee; store_unbond_wait_list( &mut deps.storage, current_batch.id, sender.clone(), amount_with_fee, )?; total_supply = (total_supply - amount).expect("the requested can not be more than the total supply"); // Update exchange rate state.update_exchange_rate(total_supply, current_batch.requested_with_fee); let current_time = env.block.time; let passed_time = current_time - state.last_unbonded_time; let mut messages: Vec<CosmosMsg> = vec![]; // If the epoch period is passed, the undelegate message would be sent. if passed_time > epoch_period { // Apply the current exchange rate. let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate; // the contract must stop if if undelegation_amount == Uint128(1) { return Err(StdError::generic_err( "Burn amount must be greater than 1 ubluna", )); } let delegator = env.contract.address; let block_height = env.block.height; // Send undelegated requests to possibly more than one validators let mut undelegated_msgs = pick_validator(deps, undelegation_amount, delegator, block_height)?; messages.append(&mut undelegated_msgs); state.total_bond_amount = (state.total_bond_amount - undelegation_amount) .expect("undelegation amount can not be more than stored total bonded amount"); // Store history for withdraw unbonded let history = UnbondHistory { batch_id: current_batch.id, time: env.block.time, amount: current_batch.requested_with_fee, applied_exchange_rate: state.exchange_rate, withdraw_rate: state.exchange_rate, released: false, }; store_unbond_history(&mut deps.storage, current_batch.id, history)?; // batch info must be updated to new batch current_batch.id += 1; current_batch.requested_with_fee = Uint128::zero(); // state.last_unbonded_time must be updated to the current block time state.last_unbonded_time = env.block.time; } // Store the new requested_with_fee or id in the current batch store_current_batch(&mut deps.storage).save(&current_batch)?; // Store state's new exchange rate store_state(&mut deps.storage).save(&state)?; // Send Burn message to token contract let config = read_config(&deps.storage).load()?; let token_address = deps.api.human_address( &config .token_contract .expect("the token contract must have been registered"), )?; let burn_msg = Cw20HandleMsg::Burn { amount }; messages.push(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: token_address, msg: to_binary(&burn_msg)?, send: vec![], })); let res = HandleResponse { messages, log: vec![ log("action", "burn"), log("from", sender), log("burnt_amount", amount), log("unbonded_amount", amount_with_fee), ], data: None, }; Ok(res) } pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, ) -> StdResult<HandleResponse> { let sender_human = env.message.sender.clone(); let contract_address = env.contract.address.clone(); // read params let params = read_parameters(&deps.storage).load()?; let unbonding_period = params.unbonding_period; let coin_denom = params.underlying_coin_denom; let historical_time = env.block.time - unbonding_period; // query hub balance for process withdraw rate. let hub_balance = deps .querier .query_balance(&env.contract.address, &*coin_denom)? .amount; // calculate withdraw rate for user requests process_withdraw_rate(deps, historical_time, hub_balance)?; let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap(); if withdraw_amount.is_zero() { return Err(StdError::generic_err(format!( "No withdrawable {} assets are available yet", coin_denom ))); } // remove the previous batches for the user let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?; remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?; // Update previous balance used for calculation in next Luna batch release let prev_balance = (hub_balance - withdraw_amount)?; store_state(&mut deps.storage).update(|mut last_state| { last_state.prev_hub_balance = prev_balance; Ok(last_state) })?; // Send the money to the user let msgs = vec![BankMsg::Send { from_address: contract_address.clone(), to_address: sender_human, amount: coins(withdraw_amount.u128(), &*coin_denom), } .into()]; let res = HandleResponse { messages: msgs, log: vec![ log("action", "finish_burn"), log("from", contract_address), log("amount", withdraw_amount), ], data: None, }; Ok(res) } /// This is designed for an accurate unbonded amount calculation. /// Execute while processing withdraw_unbonded fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, historical_time: u64, hub_balance: Uint128, ) -> StdResult<()> { // balance change of the hub contract must be checked. let mut total_unbonded_amount = Uint128::zero(); let mut state = read_state(&deps.storage).load()?; let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance); state.actual_unbonded_amount += balance_change.0; let last_processed_batch = state.last_processed_batch; let mut batch_count: u64 = 0; // Iterate over unbonded histories that have been processed // to calculate newly added unbonded amount let mut i = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, i) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h.clone(); } else { break; } } Err(_) => break, } let burnt_amount = history.amount; let historical_rate = history.withdraw_rate; let unbonded_amount = burnt_amount * historical_rate; total_unbonded_amount += unbonded_amount; batch_count += 1; i += 1; } if batch_count >= 1 { // Use signed integer in case of some rogue transfers. let slashed_amount = SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount); // Iterate again to calculate the withdraw rate for each unprocessed history let mut iterator = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, iterator) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h } else { break; } } Err(_) => { break; } } let burnt_amount_of_batch = history.amount; let historical_rate_of_batch = history.withdraw_rate; let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch; // the slashed amount for each batch must be proportional to the unbonded amount of batch let batch_slashing_weight = Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount); let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0; let actual_unbonded_amount_of_batch: Uint128; // If slashed amount is negative, there should be summation instead of subtraction. if slashed_amount.1 { slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?; actual_unbonded_amount_of_batch = unbonded_amount_of_batch + slashed_amount_of_batch; } else { if slashed_amount.0.u128()!= 0u128 { slashed_amount_of_batch += Uint128(1); } actual_unbonded_amount_of_batch = SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch) .0; } // Calculate the new withdraw rate let new_withdraw_rate = Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch); let mut history_for_i = history; // store the history and mark it as released history_for_i.withdraw_rate = new_withdraw_rate; history_for_i.released = true; store_unbond_history(&mut deps.storage, iterator, history_for_i)?; state.last_processed_batch = iterator; iterator += 1; } } // Store state.actual_unbonded_amount for future new batches release state.actual_unbonded_amount = Uint128::zero(); store_state(&mut deps.storage).save(&state)?; Ok(()) } fn pick_validator<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, claim: Uint128, delegator: HumanAddr, block_height: u64, ) -> StdResult<Vec<CosmosMsg>> { //read params let params = read_parameters(&deps.storage).load()?; let coin_denom = params.underlying_coin_denom; let mut messages: Vec<CosmosMsg> = vec![]; let mut claimed = claim; let all_delegations = deps .querier .query_all_delegations(delegator) .expect("There must be at least one delegation"); // pick a random validator // if it does not have requested amount, undelegate all it has // and pick another random validator let mut iteration_index = 0; let mut deletable_delegations = all_delegations; while claimed.0 > 0 { let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index); let random_index = rng.gen_range(0, deletable_delegations.len()); let delegation = deletable_delegations.remove(random_index); let val = delegation.amount.amount; let undelegated_amount: Uint128; if val.0 > claimed.0 { undelegated_amount = claimed; claimed = Uint128::zero(); } else { undelegated_amount = val; claimed = (claimed - val)?; } if undelegated_amount.0 > 0 { let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate { validator: delegation.validator, amount: coin(undelegated_amount.0, &*coin_denom), }); messages.push(msgs); } iteration_index += 1; } Ok(messages) }
handle_unbond
identifier_name
unbond.rs
use crate::contract::{query_total_issued, slashing}; use crate::state::{ get_finished_amount, get_unbond_batches, read_config, read_current_batch, read_parameters, read_state, read_unbond_history, remove_unbond_wait_list, store_current_batch, store_state, store_unbond_history, store_unbond_wait_list, UnbondHistory, }; use cosmwasm_std::{ coin, coins, log, to_binary, Api, BankMsg, CosmosMsg, Decimal, Env, Extern, HandleResponse, HumanAddr, Querier, StakingMsg, StdError, StdResult, Storage, Uint128, WasmMsg, }; use cw20::Cw20HandleMsg; use rand::{Rng, SeedableRng, XorShiftRng}; use signed_integer::SignedInt; /// This message must be call by receive_cw20 /// This message will undelegate coin and burn basset token pub(crate) fn handle_unbond<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, amount: Uint128, sender: HumanAddr, ) -> StdResult<HandleResponse> { // Read params let params = read_parameters(&deps.storage).load()?; let epoch_period = params.epoch_period; let threshold = params.er_threshold; let recovery_fee = params.peg_recovery_fee; let mut current_batch = read_current_batch(&deps.storage).load()?; // Check slashing, update state, and calculate the new exchange rate. slashing(deps, env.clone())?; let mut state = read_state(&deps.storage).load()?; let mut total_supply = query_total_issued(&deps).unwrap_or_default(); // Collect all the requests within a epoch period // Apply peg recovery fee let amount_with_fee: Uint128; if state.exchange_rate < threshold { let max_peg_fee = amount * recovery_fee; let required_peg_fee = ((total_supply + current_batch.requested_with_fee) - state.total_bond_amount)?; let peg_fee = Uint128::min(max_peg_fee, required_peg_fee); amount_with_fee = (amount - peg_fee)?; } else { amount_with_fee = amount; } current_batch.requested_with_fee += amount_with_fee; store_unbond_wait_list( &mut deps.storage, current_batch.id, sender.clone(), amount_with_fee, )?; total_supply = (total_supply - amount).expect("the requested can not be more than the total supply"); // Update exchange rate state.update_exchange_rate(total_supply, current_batch.requested_with_fee); let current_time = env.block.time; let passed_time = current_time - state.last_unbonded_time; let mut messages: Vec<CosmosMsg> = vec![]; // If the epoch period is passed, the undelegate message would be sent. if passed_time > epoch_period { // Apply the current exchange rate. let undelegation_amount = current_batch.requested_with_fee * state.exchange_rate; // the contract must stop if if undelegation_amount == Uint128(1) { return Err(StdError::generic_err( "Burn amount must be greater than 1 ubluna", )); } let delegator = env.contract.address; let block_height = env.block.height; // Send undelegated requests to possibly more than one validators let mut undelegated_msgs = pick_validator(deps, undelegation_amount, delegator, block_height)?; messages.append(&mut undelegated_msgs); state.total_bond_amount = (state.total_bond_amount - undelegation_amount) .expect("undelegation amount can not be more than stored total bonded amount"); // Store history for withdraw unbonded let history = UnbondHistory { batch_id: current_batch.id, time: env.block.time, amount: current_batch.requested_with_fee, applied_exchange_rate: state.exchange_rate, withdraw_rate: state.exchange_rate, released: false, }; store_unbond_history(&mut deps.storage, current_batch.id, history)?; // batch info must be updated to new batch current_batch.id += 1; current_batch.requested_with_fee = Uint128::zero(); // state.last_unbonded_time must be updated to the current block time state.last_unbonded_time = env.block.time; } // Store the new requested_with_fee or id in the current batch store_current_batch(&mut deps.storage).save(&current_batch)?; // Store state's new exchange rate store_state(&mut deps.storage).save(&state)?; // Send Burn message to token contract let config = read_config(&deps.storage).load()?; let token_address = deps.api.human_address( &config .token_contract .expect("the token contract must have been registered"), )?; let burn_msg = Cw20HandleMsg::Burn { amount }; messages.push(CosmosMsg::Wasm(WasmMsg::Execute { contract_addr: token_address, msg: to_binary(&burn_msg)?, send: vec![], })); let res = HandleResponse { messages, log: vec![ log("action", "burn"), log("from", sender), log("burnt_amount", amount), log("unbonded_amount", amount_with_fee), ], data: None, }; Ok(res) } pub fn handle_withdraw_unbonded<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, env: Env, ) -> StdResult<HandleResponse> { let sender_human = env.message.sender.clone(); let contract_address = env.contract.address.clone(); // read params let params = read_parameters(&deps.storage).load()?; let unbonding_period = params.unbonding_period; let coin_denom = params.underlying_coin_denom; let historical_time = env.block.time - unbonding_period; // query hub balance for process withdraw rate. let hub_balance = deps .querier .query_balance(&env.contract.address, &*coin_denom)? .amount; // calculate withdraw rate for user requests process_withdraw_rate(deps, historical_time, hub_balance)?; let withdraw_amount = get_finished_amount(&deps.storage, sender_human.clone()).unwrap(); if withdraw_amount.is_zero() { return Err(StdError::generic_err(format!( "No withdrawable {} assets are available yet", coin_denom ))); } // remove the previous batches for the user let deprecated_batches = get_unbond_batches(&deps.storage, sender_human.clone())?; remove_unbond_wait_list(&mut deps.storage, deprecated_batches, sender_human.clone())?; // Update previous balance used for calculation in next Luna batch release let prev_balance = (hub_balance - withdraw_amount)?; store_state(&mut deps.storage).update(|mut last_state| { last_state.prev_hub_balance = prev_balance;
})?; // Send the money to the user let msgs = vec![BankMsg::Send { from_address: contract_address.clone(), to_address: sender_human, amount: coins(withdraw_amount.u128(), &*coin_denom), } .into()]; let res = HandleResponse { messages: msgs, log: vec![ log("action", "finish_burn"), log("from", contract_address), log("amount", withdraw_amount), ], data: None, }; Ok(res) } /// This is designed for an accurate unbonded amount calculation. /// Execute while processing withdraw_unbonded fn process_withdraw_rate<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, historical_time: u64, hub_balance: Uint128, ) -> StdResult<()> { // balance change of the hub contract must be checked. let mut total_unbonded_amount = Uint128::zero(); let mut state = read_state(&deps.storage).load()?; let balance_change = SignedInt::from_subtraction(hub_balance, state.prev_hub_balance); state.actual_unbonded_amount += balance_change.0; let last_processed_batch = state.last_processed_batch; let mut batch_count: u64 = 0; // Iterate over unbonded histories that have been processed // to calculate newly added unbonded amount let mut i = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, i) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h.clone(); } else { break; } } Err(_) => break, } let burnt_amount = history.amount; let historical_rate = history.withdraw_rate; let unbonded_amount = burnt_amount * historical_rate; total_unbonded_amount += unbonded_amount; batch_count += 1; i += 1; } if batch_count >= 1 { // Use signed integer in case of some rogue transfers. let slashed_amount = SignedInt::from_subtraction(total_unbonded_amount, state.actual_unbonded_amount); // Iterate again to calculate the withdraw rate for each unprocessed history let mut iterator = last_processed_batch + 1; loop { let history: UnbondHistory; match read_unbond_history(&deps.storage, iterator) { Ok(h) => { if h.time > historical_time { break; } if!h.released { history = h } else { break; } } Err(_) => { break; } } let burnt_amount_of_batch = history.amount; let historical_rate_of_batch = history.withdraw_rate; let unbonded_amount_of_batch = burnt_amount_of_batch * historical_rate_of_batch; // the slashed amount for each batch must be proportional to the unbonded amount of batch let batch_slashing_weight = Decimal::from_ratio(unbonded_amount_of_batch, total_unbonded_amount); let mut slashed_amount_of_batch = batch_slashing_weight * slashed_amount.0; let actual_unbonded_amount_of_batch: Uint128; // If slashed amount is negative, there should be summation instead of subtraction. if slashed_amount.1 { slashed_amount_of_batch = (slashed_amount_of_batch - Uint128(1))?; actual_unbonded_amount_of_batch = unbonded_amount_of_batch + slashed_amount_of_batch; } else { if slashed_amount.0.u128()!= 0u128 { slashed_amount_of_batch += Uint128(1); } actual_unbonded_amount_of_batch = SignedInt::from_subtraction(unbonded_amount_of_batch, slashed_amount_of_batch) .0; } // Calculate the new withdraw rate let new_withdraw_rate = Decimal::from_ratio(actual_unbonded_amount_of_batch, burnt_amount_of_batch); let mut history_for_i = history; // store the history and mark it as released history_for_i.withdraw_rate = new_withdraw_rate; history_for_i.released = true; store_unbond_history(&mut deps.storage, iterator, history_for_i)?; state.last_processed_batch = iterator; iterator += 1; } } // Store state.actual_unbonded_amount for future new batches release state.actual_unbonded_amount = Uint128::zero(); store_state(&mut deps.storage).save(&state)?; Ok(()) } fn pick_validator<S: Storage, A: Api, Q: Querier>( deps: &mut Extern<S, A, Q>, claim: Uint128, delegator: HumanAddr, block_height: u64, ) -> StdResult<Vec<CosmosMsg>> { //read params let params = read_parameters(&deps.storage).load()?; let coin_denom = params.underlying_coin_denom; let mut messages: Vec<CosmosMsg> = vec![]; let mut claimed = claim; let all_delegations = deps .querier .query_all_delegations(delegator) .expect("There must be at least one delegation"); // pick a random validator // if it does not have requested amount, undelegate all it has // and pick another random validator let mut iteration_index = 0; let mut deletable_delegations = all_delegations; while claimed.0 > 0 { let mut rng = XorShiftRng::seed_from_u64(block_height + iteration_index); let random_index = rng.gen_range(0, deletable_delegations.len()); let delegation = deletable_delegations.remove(random_index); let val = delegation.amount.amount; let undelegated_amount: Uint128; if val.0 > claimed.0 { undelegated_amount = claimed; claimed = Uint128::zero(); } else { undelegated_amount = val; claimed = (claimed - val)?; } if undelegated_amount.0 > 0 { let msgs: CosmosMsg = CosmosMsg::Staking(StakingMsg::Undelegate { validator: delegation.validator, amount: coin(undelegated_amount.0, &*coin_denom), }); messages.push(msgs); } iteration_index += 1; } Ok(messages) }
Ok(last_state)
random_line_split
main.rs
fuchsia_zircon::{self as zx, AsHandleRef}, futures::{prelude::*, StreamExt}, hyper, std::convert::TryFrom, std::str::FromStr as _, tracing::{debug, error, info, trace}, }; static MAX_REDIRECTS: u8 = 10; static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15); fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> { match status.canonical_reason() { None => format!("{:?} {}", version, status.as_str()), Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason), } .as_bytes() .to_vec() } fn tcp_options() -> fhyper::TcpOptions { let mut options: fhyper::TcpOptions = std::default::Default::default(); // Use TCP keepalive to notice stuck connections. // After 60s with no data received send a probe every 15s. options.keepalive_idle = Some(std::time::Duration::from_secs(60)); options.keepalive_interval = Some(std::time::Duration::from_secs(15)); // After 8 probes go unacknowledged treat the connection as dead. options.keepalive_count = Some(8); options } struct RedirectInfo { url: Option<hyper::Uri>, referrer: Option<hyper::Uri>, method: hyper::Method, } fn redirect_info( old_uri: &hyper::Uri, method: &hyper::Method, hyper_response: &hyper::Response<hyper::Body>, ) -> Option<RedirectInfo> { if hyper_response.status().is_redirection() { Some(RedirectInfo { url: hyper_response .headers() .get(hyper::header::LOCATION) .and_then(|loc| calculate_redirect(old_uri, loc)), referrer: hyper_response .headers() .get(hyper::header::REFERER) .and_then(|loc| calculate_redirect(old_uri, loc)), method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER { hyper::Method::GET } else { method.clone() }, }) } else { None } } async fn to_success_response( current_url: &hyper::Uri, current_method: &hyper::Method, mut hyper_response: hyper::Response<hyper::Body>, ) -> Result<net_http::Response, zx::Status> { let redirect_info = redirect_info(current_url, current_method, &hyper_response); let headers = hyper_response .headers() .iter() .map(|(name, value)| net_http::Header { name: name.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect(); let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?; let response = net_http::Response { error: None, body: Some(rx), final_url: Some(current_url.to_string()), status_code: Some(hyper_response.status().as_u16() as u32), status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())), headers: Some(headers), redirect: redirect_info.map(|info| net_http::RedirectTarget { method: Some(info.method.to_string()), url: info.url.map(|u| u.to_string()), referrer: info.referrer.map(|r| r.to_string()), ..net_http::RedirectTarget::EMPTY }), ..net_http::Response::EMPTY }; fasync::Task::spawn(async move { let hyper_body = hyper_response.body_mut(); while let Some(chunk) = hyper_body.next().await { if let Ok(chunk) = chunk { let mut offset: usize = 0; while offset < chunk.len() { let pending = match tx.wait_handle( zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE, zx::Time::INFINITE, ) { Err(status) => { error!("tx.wait() failed - status: {}", status); return; } Ok(pending) => pending, }; if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) { info!("tx.wait() saw signal SOCKET_PEER_CLOSED"); return; } assert!(pending.contains(zx::Signals::SOCKET_WRITABLE)); let written = match tx.write(&chunk[offset..]) { Err(status) => { // Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid // brittle-ness, continue and wait again in that case. if status == zx::Status::SHOULD_WAIT { error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing"); continue; } info!("tx.write() failed - status: {}", status); return; } Ok(written) => written, }; offset += written; } } } }).detach(); Ok(response) } fn to_fidl_error(error: &hyper::Error) -> net_http::Error { #[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028) if error.is_parse() { net_http::Error::UnableToParse } else if error.is_user() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_canceled() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_closed() { net_http::Error::ChannelClosed } else if error.is_connect() { net_http::Error::Connect } else if error.is_incomplete_message() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_body_write_aborted() { //TODO(zmbush): handle this case. net_http::Error::Internal } else { net_http::Error::Internal } } fn to_error_response(error: net_http::Error) -> net_http::Response { net_http::Response { error: Some(error), body: None, final_url: None, status_code: None, status_line: None, headers: None, redirect: None, ..net_http::Response::EMPTY } } struct Loader { method: hyper::Method, url: hyper::Uri, headers: hyper::HeaderMap, body: Vec<u8>, deadline: fasync::Time, } impl Loader { async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> { let net_http::Request { method, url, headers, body, deadline,.. } = req; let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?; let method = method.unwrap_or(hyper::Method::GET); if let Some(url) = url { let url = hyper::Uri::try_from(url)?; let headers = headers .unwrap_or_else(|| vec![]) .into_iter() .map(|net_http::Header { name, value }| { let name = hyper::header::HeaderName::from_bytes(&name)?; let value = hyper::header::HeaderValue::from_bytes(&value)?; Ok((name, value)) }) .collect::<Result<hyper::HeaderMap, anyhow::Error>>()?; let body = match body { Some(net_http::Body::Buffer(buffer)) => { let mut bytes = vec![0; buffer.size as usize]; buffer.vmo.read(&mut bytes, 0)?; bytes } Some(net_http::Body::Stream(socket)) => { let mut stream = fasync::Socket::from_socket(socket)? .into_datagram_stream() .map(|r| r.context("reading from datagram stream")); let mut bytes = Vec::new(); while let Some(chunk) = stream.next().await { bytes.extend(chunk?); } bytes } None => Vec::new(), }; let deadline = deadline .map(|deadline| fasync::Time::from_nanos(deadline)) .unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION)); trace!("Starting request {} {}", method, url); Ok(Loader { method, url, headers, body, deadline }) } else { Err(anyhow::Error::msg("Request missing URL")) } } fn build_request(&self) -> hyper::Request<hyper::Body> { let Self { method, url, headers, body, deadline: _ } = self; let mut request = hyper::Request::new(body.clone().into()); *request.method_mut() = method.clone(); *request.uri_mut() = url.clone(); *request.headers_mut() = headers.clone(); request } async fn
(mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status> { let client = fhyper::new_https_client_from_tcp_options(tcp_options()); loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!( "Reporting redirect to OnResponse: {} {}", self.method, self.url ); let response = to_success_response(&self.url, &self.method, hyper_response) .await?; match loader_client.on_response(response).await { Ok(()) => {} Err(e) => { debug!("Not redirecting because: {}", e); break Ok(()); } }; trace!("Redirect allowed to {} {}", self.method, self.url); continue; } } let response = to_success_response(&self.url, &self.method, hyper_response).await?; // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(response).await; Ok(()) } Err(error) => { info!("Received network level error from hyper: {}", error); // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(to_error_response(to_fidl_error(&error))).await; Ok(()) } }; } } async fn fetch( mut self, ) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> { let deadline = self.deadline; if deadline < fasync::Time::now() { return Err(net_http::Error::DeadlineExceeded); } let client = fhyper::new_https_client_from_tcp_options(tcp_options()); async move { let mut redirects = 0; loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { if redirects!= MAX_REDIRECTS { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!("Redirecting to {} {}", self.method, self.url); redirects += 1; continue; } } } Ok((hyper_response, self.url, self.method)) } Err(e) => { info!("Received network level error from hyper: {}", e); Err(to_fidl_error(&e)) } }; } } .on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded)) .await } } fn calculate_redirect( old_url: &hyper::Uri, location: &hyper::header::HeaderValue, ) -> Option<hyper::Uri> { let old_parts = old_url.clone().into_parts(); let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts(); if new_parts.scheme.is_none() { new_parts.scheme = old_parts.scheme; } if new_parts.authority.is_none() { new_parts.authority = old_parts.authority; } Some(hyper::Uri::from_parts(new_parts).ok()?) } fn spawn_server(stream: net_http::LoaderRequestStream) { fasync::Task::spawn( async move { stream .err_into() .try_for_each_concurrent(None, |message| async move { match message { net_http::LoaderRequest::Fetch { request, responder } => { debug!( "Fetch request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); let result = Loader::new(request).await?.fetch().await; responder.send(match result { Ok((hyper_response, final_url, final_method)) => { to_success_response(&final_url, &final_method, hyper_response) .await? } Err(error) => to_error_response(error), })?; } net_http::LoaderRequest::Start { request, client, control_handle } => { debug!( "Start request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); Loader::new(request).await?.start(client.into_proxy()?).await?;
start
identifier_name
main.rs
fuchsia_zircon::{self as zx, AsHandleRef}, futures::{prelude::*, StreamExt}, hyper, std::convert::TryFrom, std::str::FromStr as _, tracing::{debug, error, info, trace}, }; static MAX_REDIRECTS: u8 = 10; static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15); fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> { match status.canonical_reason() { None => format!("{:?} {}", version, status.as_str()), Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason), } .as_bytes() .to_vec() } fn tcp_options() -> fhyper::TcpOptions { let mut options: fhyper::TcpOptions = std::default::Default::default(); // Use TCP keepalive to notice stuck connections. // After 60s with no data received send a probe every 15s. options.keepalive_idle = Some(std::time::Duration::from_secs(60)); options.keepalive_interval = Some(std::time::Duration::from_secs(15)); // After 8 probes go unacknowledged treat the connection as dead. options.keepalive_count = Some(8); options } struct RedirectInfo { url: Option<hyper::Uri>, referrer: Option<hyper::Uri>, method: hyper::Method, } fn redirect_info( old_uri: &hyper::Uri, method: &hyper::Method, hyper_response: &hyper::Response<hyper::Body>, ) -> Option<RedirectInfo> { if hyper_response.status().is_redirection() { Some(RedirectInfo { url: hyper_response .headers() .get(hyper::header::LOCATION) .and_then(|loc| calculate_redirect(old_uri, loc)), referrer: hyper_response .headers() .get(hyper::header::REFERER) .and_then(|loc| calculate_redirect(old_uri, loc)), method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER { hyper::Method::GET } else { method.clone() }, }) } else { None } } async fn to_success_response( current_url: &hyper::Uri, current_method: &hyper::Method, mut hyper_response: hyper::Response<hyper::Body>, ) -> Result<net_http::Response, zx::Status> { let redirect_info = redirect_info(current_url, current_method, &hyper_response); let headers = hyper_response .headers() .iter() .map(|(name, value)| net_http::Header { name: name.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect(); let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?; let response = net_http::Response { error: None, body: Some(rx), final_url: Some(current_url.to_string()), status_code: Some(hyper_response.status().as_u16() as u32), status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())), headers: Some(headers), redirect: redirect_info.map(|info| net_http::RedirectTarget { method: Some(info.method.to_string()), url: info.url.map(|u| u.to_string()), referrer: info.referrer.map(|r| r.to_string()), ..net_http::RedirectTarget::EMPTY }), ..net_http::Response::EMPTY }; fasync::Task::spawn(async move { let hyper_body = hyper_response.body_mut(); while let Some(chunk) = hyper_body.next().await { if let Ok(chunk) = chunk { let mut offset: usize = 0; while offset < chunk.len() { let pending = match tx.wait_handle( zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE, zx::Time::INFINITE, ) { Err(status) => { error!("tx.wait() failed - status: {}", status); return; } Ok(pending) => pending, }; if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) { info!("tx.wait() saw signal SOCKET_PEER_CLOSED"); return; } assert!(pending.contains(zx::Signals::SOCKET_WRITABLE)); let written = match tx.write(&chunk[offset..]) { Err(status) => { // Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid // brittle-ness, continue and wait again in that case. if status == zx::Status::SHOULD_WAIT { error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing"); continue; } info!("tx.write() failed - status: {}", status); return; } Ok(written) => written, }; offset += written; } } } }).detach(); Ok(response) } fn to_fidl_error(error: &hyper::Error) -> net_http::Error { #[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028) if error.is_parse() { net_http::Error::UnableToParse } else if error.is_user() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_canceled() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_closed() { net_http::Error::ChannelClosed } else if error.is_connect() { net_http::Error::Connect } else if error.is_incomplete_message() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_body_write_aborted() { //TODO(zmbush): handle this case. net_http::Error::Internal } else { net_http::Error::Internal } } fn to_error_response(error: net_http::Error) -> net_http::Response { net_http::Response { error: Some(error), body: None, final_url: None, status_code: None, status_line: None, headers: None, redirect: None, ..net_http::Response::EMPTY } } struct Loader { method: hyper::Method, url: hyper::Uri, headers: hyper::HeaderMap, body: Vec<u8>, deadline: fasync::Time, } impl Loader { async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> { let net_http::Request { method, url, headers, body, deadline,.. } = req; let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?; let method = method.unwrap_or(hyper::Method::GET); if let Some(url) = url { let url = hyper::Uri::try_from(url)?; let headers = headers .unwrap_or_else(|| vec![]) .into_iter() .map(|net_http::Header { name, value }| { let name = hyper::header::HeaderName::from_bytes(&name)?; let value = hyper::header::HeaderValue::from_bytes(&value)?; Ok((name, value)) }) .collect::<Result<hyper::HeaderMap, anyhow::Error>>()?; let body = match body { Some(net_http::Body::Buffer(buffer)) => { let mut bytes = vec![0; buffer.size as usize]; buffer.vmo.read(&mut bytes, 0)?; bytes } Some(net_http::Body::Stream(socket)) => { let mut stream = fasync::Socket::from_socket(socket)? .into_datagram_stream() .map(|r| r.context("reading from datagram stream")); let mut bytes = Vec::new(); while let Some(chunk) = stream.next().await { bytes.extend(chunk?); } bytes } None => Vec::new(), }; let deadline = deadline .map(|deadline| fasync::Time::from_nanos(deadline)) .unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION)); trace!("Starting request {} {}", method, url); Ok(Loader { method, url, headers, body, deadline }) } else { Err(anyhow::Error::msg("Request missing URL")) } } fn build_request(&self) -> hyper::Request<hyper::Body> { let Self { method, url, headers, body, deadline: _ } = self; let mut request = hyper::Request::new(body.clone().into()); *request.method_mut() = method.clone(); *request.uri_mut() = url.clone(); *request.headers_mut() = headers.clone(); request } async fn start(mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status>
Err(e) => { debug!("Not redirecting because: {}", e); break Ok(()); } }; trace!("Redirect allowed to {} {}", self.method, self.url); continue; } } let response = to_success_response(&self.url, &self.method, hyper_response).await?; // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(response).await; Ok(()) } Err(error) => { info!("Received network level error from hyper: {}", error); // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(to_error_response(to_fidl_error(&error))).await; Ok(()) } }; } } async fn fetch( mut self, ) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> { let deadline = self.deadline; if deadline < fasync::Time::now() { return Err(net_http::Error::DeadlineExceeded); } let client = fhyper::new_https_client_from_tcp_options(tcp_options()); async move { let mut redirects = 0; loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { if redirects!= MAX_REDIRECTS { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!("Redirecting to {} {}", self.method, self.url); redirects += 1; continue; } } } Ok((hyper_response, self.url, self.method)) } Err(e) => { info!("Received network level error from hyper: {}", e); Err(to_fidl_error(&e)) } }; } } .on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded)) .await } } fn calculate_redirect( old_url: &hyper::Uri, location: &hyper::header::HeaderValue, ) -> Option<hyper::Uri> { let old_parts = old_url.clone().into_parts(); let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts(); if new_parts.scheme.is_none() { new_parts.scheme = old_parts.scheme; } if new_parts.authority.is_none() { new_parts.authority = old_parts.authority; } Some(hyper::Uri::from_parts(new_parts).ok()?) } fn spawn_server(stream: net_http::LoaderRequestStream) { fasync::Task::spawn( async move { stream .err_into() .try_for_each_concurrent(None, |message| async move { match message { net_http::LoaderRequest::Fetch { request, responder } => { debug!( "Fetch request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); let result = Loader::new(request).await?.fetch().await; responder.send(match result { Ok((hyper_response, final_url, final_method)) => { to_success_response(&final_url, &final_method, hyper_response) .await? } Err(error) => to_error_response(error), })?; } net_http::LoaderRequest::Start { request, client, control_handle } => { debug!( "Start request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); Loader::new(request).await?.start(client.into_proxy()?).await?;
{ let client = fhyper::new_https_client_from_tcp_options(tcp_options()); loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!( "Reporting redirect to OnResponse: {} {}", self.method, self.url ); let response = to_success_response(&self.url, &self.method, hyper_response) .await?; match loader_client.on_response(response).await { Ok(()) => {}
identifier_body
main.rs
fuchsia_zircon::{self as zx, AsHandleRef}, futures::{prelude::*, StreamExt}, hyper, std::convert::TryFrom, std::str::FromStr as _, tracing::{debug, error, info, trace}, }; static MAX_REDIRECTS: u8 = 10; static DEFAULT_DEADLINE_DURATION: zx::Duration = zx::Duration::from_seconds(15); fn to_status_line(version: hyper::Version, status: hyper::StatusCode) -> Vec<u8> { match status.canonical_reason() { None => format!("{:?} {}", version, status.as_str()), Some(canonical_reason) => format!("{:?} {} {}", version, status.as_str(), canonical_reason), } .as_bytes() .to_vec() } fn tcp_options() -> fhyper::TcpOptions { let mut options: fhyper::TcpOptions = std::default::Default::default(); // Use TCP keepalive to notice stuck connections. // After 60s with no data received send a probe every 15s. options.keepalive_idle = Some(std::time::Duration::from_secs(60)); options.keepalive_interval = Some(std::time::Duration::from_secs(15)); // After 8 probes go unacknowledged treat the connection as dead. options.keepalive_count = Some(8); options } struct RedirectInfo { url: Option<hyper::Uri>, referrer: Option<hyper::Uri>, method: hyper::Method, } fn redirect_info( old_uri: &hyper::Uri, method: &hyper::Method, hyper_response: &hyper::Response<hyper::Body>, ) -> Option<RedirectInfo> { if hyper_response.status().is_redirection() { Some(RedirectInfo { url: hyper_response .headers() .get(hyper::header::LOCATION) .and_then(|loc| calculate_redirect(old_uri, loc)), referrer: hyper_response .headers() .get(hyper::header::REFERER) .and_then(|loc| calculate_redirect(old_uri, loc)), method: if hyper_response.status() == hyper::StatusCode::SEE_OTHER { hyper::Method::GET } else { method.clone() }, }) } else { None } } async fn to_success_response( current_url: &hyper::Uri, current_method: &hyper::Method, mut hyper_response: hyper::Response<hyper::Body>, ) -> Result<net_http::Response, zx::Status> { let redirect_info = redirect_info(current_url, current_method, &hyper_response); let headers = hyper_response .headers() .iter() .map(|(name, value)| net_http::Header { name: name.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect(); let (tx, rx) = zx::Socket::create(zx::SocketOpts::STREAM)?; let response = net_http::Response { error: None, body: Some(rx), final_url: Some(current_url.to_string()), status_code: Some(hyper_response.status().as_u16() as u32), status_line: Some(to_status_line(hyper_response.version(), hyper_response.status())), headers: Some(headers), redirect: redirect_info.map(|info| net_http::RedirectTarget { method: Some(info.method.to_string()), url: info.url.map(|u| u.to_string()), referrer: info.referrer.map(|r| r.to_string()), ..net_http::RedirectTarget::EMPTY }), ..net_http::Response::EMPTY }; fasync::Task::spawn(async move { let hyper_body = hyper_response.body_mut(); while let Some(chunk) = hyper_body.next().await { if let Ok(chunk) = chunk { let mut offset: usize = 0; while offset < chunk.len() { let pending = match tx.wait_handle( zx::Signals::SOCKET_PEER_CLOSED | zx::Signals::SOCKET_WRITABLE, zx::Time::INFINITE, ) { Err(status) => { error!("tx.wait() failed - status: {}", status); return; } Ok(pending) => pending, }; if pending.contains(zx::Signals::SOCKET_PEER_CLOSED) { info!("tx.wait() saw signal SOCKET_PEER_CLOSED"); return; } assert!(pending.contains(zx::Signals::SOCKET_WRITABLE)); let written = match tx.write(&chunk[offset..]) { Err(status) => { // Because of the wait above, we shouldn't ever see SHOULD_WAIT here, but to avoid // brittle-ness, continue and wait again in that case. if status == zx::Status::SHOULD_WAIT { error!("Saw SHOULD_WAIT despite waiting first - expected now? - continuing"); continue; } info!("tx.write() failed - status: {}", status); return; } Ok(written) => written, }; offset += written; } } } }).detach(); Ok(response) } fn to_fidl_error(error: &hyper::Error) -> net_http::Error { #[allow(clippy::if_same_then_else)] // TODO(fxbug.dev/95028) if error.is_parse() { net_http::Error::UnableToParse } else if error.is_user() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_canceled() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_closed() { net_http::Error::ChannelClosed } else if error.is_connect() { net_http::Error::Connect } else if error.is_incomplete_message() { //TODO(zmbush): handle this case. net_http::Error::Internal } else if error.is_body_write_aborted() { //TODO(zmbush): handle this case. net_http::Error::Internal } else { net_http::Error::Internal }
} fn to_error_response(error: net_http::Error) -> net_http::Response { net_http::Response { error: Some(error), body: None, final_url: None, status_code: None, status_line: None, headers: None, redirect: None, ..net_http::Response::EMPTY } } struct Loader { method: hyper::Method, url: hyper::Uri, headers: hyper::HeaderMap, body: Vec<u8>, deadline: fasync::Time, } impl Loader { async fn new(req: net_http::Request) -> Result<Self, anyhow::Error> { let net_http::Request { method, url, headers, body, deadline,.. } = req; let method = method.as_ref().map(|method| hyper::Method::from_str(method)).transpose()?; let method = method.unwrap_or(hyper::Method::GET); if let Some(url) = url { let url = hyper::Uri::try_from(url)?; let headers = headers .unwrap_or_else(|| vec![]) .into_iter() .map(|net_http::Header { name, value }| { let name = hyper::header::HeaderName::from_bytes(&name)?; let value = hyper::header::HeaderValue::from_bytes(&value)?; Ok((name, value)) }) .collect::<Result<hyper::HeaderMap, anyhow::Error>>()?; let body = match body { Some(net_http::Body::Buffer(buffer)) => { let mut bytes = vec![0; buffer.size as usize]; buffer.vmo.read(&mut bytes, 0)?; bytes } Some(net_http::Body::Stream(socket)) => { let mut stream = fasync::Socket::from_socket(socket)? .into_datagram_stream() .map(|r| r.context("reading from datagram stream")); let mut bytes = Vec::new(); while let Some(chunk) = stream.next().await { bytes.extend(chunk?); } bytes } None => Vec::new(), }; let deadline = deadline .map(|deadline| fasync::Time::from_nanos(deadline)) .unwrap_or_else(|| fasync::Time::after(DEFAULT_DEADLINE_DURATION)); trace!("Starting request {} {}", method, url); Ok(Loader { method, url, headers, body, deadline }) } else { Err(anyhow::Error::msg("Request missing URL")) } } fn build_request(&self) -> hyper::Request<hyper::Body> { let Self { method, url, headers, body, deadline: _ } = self; let mut request = hyper::Request::new(body.clone().into()); *request.method_mut() = method.clone(); *request.uri_mut() = url.clone(); *request.headers_mut() = headers.clone(); request } async fn start(mut self, loader_client: net_http::LoaderClientProxy) -> Result<(), zx::Status> { let client = fhyper::new_https_client_from_tcp_options(tcp_options()); loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!( "Reporting redirect to OnResponse: {} {}", self.method, self.url ); let response = to_success_response(&self.url, &self.method, hyper_response) .await?; match loader_client.on_response(response).await { Ok(()) => {} Err(e) => { debug!("Not redirecting because: {}", e); break Ok(()); } }; trace!("Redirect allowed to {} {}", self.method, self.url); continue; } } let response = to_success_response(&self.url, &self.method, hyper_response).await?; // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(response).await; Ok(()) } Err(error) => { info!("Received network level error from hyper: {}", error); // We don't care if on_response returns an error since this is the last // callback. let _: Result<_, _> = loader_client.on_response(to_error_response(to_fidl_error(&error))).await; Ok(()) } }; } } async fn fetch( mut self, ) -> Result<(hyper::Response<hyper::Body>, hyper::Uri, hyper::Method), net_http::Error> { let deadline = self.deadline; if deadline < fasync::Time::now() { return Err(net_http::Error::DeadlineExceeded); } let client = fhyper::new_https_client_from_tcp_options(tcp_options()); async move { let mut redirects = 0; loop { break match client.request(self.build_request()).await { Ok(hyper_response) => { if redirects!= MAX_REDIRECTS { let redirect = redirect_info(&self.url, &self.method, &hyper_response); if let Some(redirect) = redirect { if let Some(url) = redirect.url { self.url = url; self.method = redirect.method; trace!("Redirecting to {} {}", self.method, self.url); redirects += 1; continue; } } } Ok((hyper_response, self.url, self.method)) } Err(e) => { info!("Received network level error from hyper: {}", e); Err(to_fidl_error(&e)) } }; } } .on_timeout(deadline, || Err(net_http::Error::DeadlineExceeded)) .await } } fn calculate_redirect( old_url: &hyper::Uri, location: &hyper::header::HeaderValue, ) -> Option<hyper::Uri> { let old_parts = old_url.clone().into_parts(); let mut new_parts = hyper::Uri::try_from(location.as_bytes()).ok()?.into_parts(); if new_parts.scheme.is_none() { new_parts.scheme = old_parts.scheme; } if new_parts.authority.is_none() { new_parts.authority = old_parts.authority; } Some(hyper::Uri::from_parts(new_parts).ok()?) } fn spawn_server(stream: net_http::LoaderRequestStream) { fasync::Task::spawn( async move { stream .err_into() .try_for_each_concurrent(None, |message| async move { match message { net_http::LoaderRequest::Fetch { request, responder } => { debug!( "Fetch request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); let result = Loader::new(request).await?.fetch().await; responder.send(match result { Ok((hyper_response, final_url, final_method)) => { to_success_response(&final_url, &final_method, hyper_response) .await? } Err(error) => to_error_response(error), })?; } net_http::LoaderRequest::Start { request, client, control_handle } => { debug!( "Start request received (url: {}): {:?}", request .url .as_ref() .and_then(|url| Some(url.as_str())) .unwrap_or_default(), request ); Loader::new(request).await?.start(client.into_proxy()?).await?;
random_line_split
spritecfg.rs
#![allow(dead_code)] extern crate asar; use std::path::{PathBuf, Path}; use std::io::prelude::*; use std::fs::{File, OpenOptions}; use nom::*; use asar::rom::RomBuf; use parse_aux::dys_prefix; use genus::Genus; use dys_tables::DysTables; use insert_err::{InsertResult, format_result, warnless_result, single_error}; #[derive(Debug)] pub struct CfgErr { explain: String, } #[derive(Debug)] pub struct SpriteCfg { pub genus: Genus, pub id: u16, pub tweak_bytes: [u8; 6], pub prop_bytes: [u8; 2], pub clipping: [u8; 4], dys_option_bytes: [u8; 2], acts_like: u8, extra_bytes: u8, name: String, desc: String, name_set: Option<String>, desc_set: Option<String>, source_path: PathBuf, } #[derive(Debug, Copy, Clone)] pub struct InsertPoint { pub main: usize, pub init: usize, pub drop: usize, } impl SpriteCfg { pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { if let IResult::Done(rest, vsn) = dys_prefix(buf) { if vsn!= 1 { return Err(CfgErr { explain: String::from("You have a cfg from the future") }); } else { parse_newstyle(path, gen, id, rest) } } else { parse_oldstyle(path, gen, id, buf) } } pub fn new() -> SpriteCfg { SpriteCfg { genus: Genus::Std, id: 0, tweak_bytes: [0, 0, 0, 0, 0, 0], prop_bytes: [0, 0], clipping: [0, 0, 0, 0], dys_option_bytes: [0, 0], acts_like: 0, extra_bytes: 0, name: "".to_string(), desc: "".to_string(), name_set: None, desc_set: None, source_path: PathBuf::from(""), } } pub fn needs_init(&self) -> bool { match self.genus { Genus::Std => true, _ => false, } } pub fn needs_drop(&self) -> bool { match self.genus { Genus::Std => self.dys_option_bytes[1] & 0x80!= 0, _ => false, } } pub fn placeable(&self) -> bool { self.genus.placeable() } pub fn assemble( &self, rom: &mut RomBuf, prelude: &str, source: &Path, temp: &Path, iopts: ::insert_opts::InsertOpts ) -> InsertResult<InsertPoint> { let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize); { let mut tempasm = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(temp) .unwrap(); tempasm.write_all(prelude.as_bytes()).unwrap(); let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess. let mut srcf = warnless_result( File::open(source), |e| format!("error opening \"{}\": {}", source.to_string_lossy(), e) )?; srcf.read_to_end(&mut source_buf).unwrap(); tempasm.write_all(&source_buf).unwrap(); } let warns = match asar::patch(temp, rom) { Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(), Err((mut es, mut ws)) => { return Err( (es.drain(..).map(|e| e.into()).collect(), ws.drain(..).map(|w| w.into()).collect()) ) }, }; for print in asar::prints() { let mut chunks = print.split_whitespace(); let fst = chunks.next(); let snd = chunks.next(); match fst { Some("MAIN") => match snd { Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"MAIN\" declaration"), }, Some("INIT") => match snd { Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"INIT\" declaration"), }, Some("DROP") => match snd { Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"DROP\" declaration"), }, None => (), _ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"), } }; if main == 0 { return single_error("No main routine"); } if init == 0 && self.needs_init() { return single_error("No init routine"); } if drop == 0 && self.needs_drop() { return single_error("Drop routine required by dys_opts, but not provided"); } if drop!= 0 &&!self.needs_drop() { return single_error("Sprite has a drop routine, but dys_opts doesn't require one"); } if self.needs_drop() &&!iopts.use_drops { return single_error("Sprite needs a drop routine, but drop routines aren't enabled"); } Ok((InsertPoint { main, init, drop }, warns)) } pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) { match self.genus { Genus::Std | Genus::Gen | Genus::Sht | Genus::R1s => { if self.id < 0x200 { let size_ofs = if self.id < 0x100 { self.id as usize } else { self.id as usize + 0x100 }; let size = self.extra_bytes + 3; rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap(); rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap(); let optbase = tables.option_bytes + (self.id as usize * 0x10); rom.set_byte(optbase, self.genus.to_byte()).unwrap(); rom.set_byte(optbase + 1, self.acts_like).unwrap(); rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap(); rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap(); rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap(); rom.set_bytes(optbase + 10, &self.clipping).unwrap(); }; }, Genus::Cls => {}, _ => unimplemented!(), }; } pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) { let ofs = self.id as usize * 3; match self.genus { g if g.placeable() => { rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap(); rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap(); if tables.drop_ptrs!= 0 { rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap(); } }, Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(), _ => unimplemented!(), }; } pub fn name(&self, ebit: bool) -> &String { if ebit && self.name_set.is_some() { self.name_set.as_ref().unwrap() } else { &self.name } } pub fn desc(&self, ebit: bool) -> &String { if ebit && self.desc_set.is_some() { self.desc_set.as_ref().unwrap() } else { &self.desc } } pub fn uses_ebit(&self) -> bool { self.name_set.is_some() } pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) { if!self.placeable() { panic!("Attempted to place unplaceable sprite") }; let b0 = 0x89; let b1 = 0x80; let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 }; let ebit_val: u8 = if!ebit { 0 } else { 4 }; let b0 = b0 | num_extra_bit | ebit_val; target.push(b0); target.push(b1); if self.id >= 0x200 { target.push(0xf8 + self.extra_bytes); } target.push((self.id & 0xff) as u8); for _ in 0.. self.extra_bytes { target.push(0); }; } pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes } pub fn source_path(&self) -> &PathBuf { &self.source_path } } fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) { let root = match path.file_stem() { Some(s) => s.to_string_lossy().into_owned(), None => format!("Custom {} #{:03x}", gen.shortname(), id), }; (root.clone(), root + " (extra bit set)") } fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None); let mut cfg = SpriteCfg { genus: gen, id: id,.. SpriteCfg::new() }; let mut buf = buf; while let IResult::Done(rest, (name, value)) = cfg_line(buf) { buf = rest; match name { "acts-like" => cfg.acts_like = try!(read_byte(value)), "source" => cfg.source_path = path.with_file_name(value), "props" => try!(read_bytes(value, &mut cfg.tweak_bytes)), "xbytes" => cfg.extra_bytes = try!(read_byte(value)), "ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)), "dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)), "ext-clip" => try!(read_bytes(value, &mut cfg.clipping)), "name" => got_name = Some(String::from(value)), "description" => got_desc = Some(String::from(value)), "desc-set" => cfg.desc_set = Some(String::from(value)), "name-set" => cfg.name_set = Some(String::from(value)), "ext-prop-def" | "m16d" | "tilemap" => (), _ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }), }; }; if let Some(s) = got_name { cfg.name = s; } else { let t = default_name(path, gen, id); cfg.name = t.0; cfg.name_set = Some(t.1); }; if let Some(s) = got_desc { cfg.desc = s; } else { cfg.desc = cfg.name.clone(); cfg.desc_set = cfg.name_set.clone(); }; if cfg.source_path.file_name() == None { Err(CfgErr { explain: String::from("Sprite needs a source file") }) } else { Ok(cfg) } } fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr>
prop_bytes: [d[7], d[8]], source_path: path.with_file_name(s), name: name, name_set: Some(name_set), desc: desc, desc_set: Some(desc_set), .. SpriteCfg::new() }) } else { Err(CfgErr { explain: String::from("Old-style CFG too short") }) } } fn read_byte(s: &str) -> Result<u8, CfgErr> { let iter = s.trim().chars(); let mut n = 0u32; let mut read = false; for ch in iter { if let Some(v) = ch.to_digit(0x10) { n *= 0x10; n += v; read = true; } else { return Err(CfgErr { explain: String::from("Non-byte data in byte field") }) } } if!read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) } } fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> { let mut bytes = Vec::<u8>::with_capacity(buf.len()); for b in s.split_whitespace() { bytes.push(try!(read_byte(b))); }; if bytes.len()!= buf.len() { Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}", buf.len(), bytes.len()) }) } else { for (i, b) in bytes.iter().enumerate() { buf[i] = *b; } Ok(()) } } fn tag_ending_s(ch: char) -> bool { ch =='' || ch == ':' } fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' } named!(cfg_line(&str) -> (&str, &str), chain!( multispace? ~ name: take_till_s!(tag_ending_s) ~ space? ~ tag_s!(":") ~ space? ~ valu: take_till_s!(line_ending_s) ~ multispace? , || (name, valu) ) );
{ let mut it = buf.split_whitespace().skip(1); let mut d = [0u8; 9]; for output_byte in &mut d { if let Some(s) = it.next() { *output_byte = try!(read_byte(s)); } else { return Err(CfgErr{ explain: String::from("Old-style CFG too short") }); } }; let (name, name_set) = default_name(path, gen, id); let (desc, desc_set) = (name.clone(), name_set.clone()); if let Some(s) = it.next() { Ok(SpriteCfg { genus: gen, id: id, acts_like: d[0], tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]],
identifier_body
spritecfg.rs
#![allow(dead_code)] extern crate asar; use std::path::{PathBuf, Path}; use std::io::prelude::*; use std::fs::{File, OpenOptions}; use nom::*; use asar::rom::RomBuf; use parse_aux::dys_prefix; use genus::Genus; use dys_tables::DysTables; use insert_err::{InsertResult, format_result, warnless_result, single_error}; #[derive(Debug)] pub struct CfgErr { explain: String, } #[derive(Debug)] pub struct SpriteCfg { pub genus: Genus, pub id: u16, pub tweak_bytes: [u8; 6], pub prop_bytes: [u8; 2], pub clipping: [u8; 4], dys_option_bytes: [u8; 2], acts_like: u8, extra_bytes: u8, name: String, desc: String, name_set: Option<String>, desc_set: Option<String>, source_path: PathBuf, } #[derive(Debug, Copy, Clone)] pub struct InsertPoint { pub main: usize, pub init: usize, pub drop: usize, } impl SpriteCfg { pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { if let IResult::Done(rest, vsn) = dys_prefix(buf) { if vsn!= 1 { return Err(CfgErr { explain: String::from("You have a cfg from the future") }); } else { parse_newstyle(path, gen, id, rest) } } else { parse_oldstyle(path, gen, id, buf) } } pub fn new() -> SpriteCfg { SpriteCfg { genus: Genus::Std, id: 0, tweak_bytes: [0, 0, 0, 0, 0, 0], prop_bytes: [0, 0], clipping: [0, 0, 0, 0], dys_option_bytes: [0, 0], acts_like: 0, extra_bytes: 0, name: "".to_string(), desc: "".to_string(), name_set: None, desc_set: None, source_path: PathBuf::from(""), } } pub fn needs_init(&self) -> bool { match self.genus { Genus::Std => true, _ => false, } } pub fn needs_drop(&self) -> bool { match self.genus { Genus::Std => self.dys_option_bytes[1] & 0x80!= 0, _ => false, } } pub fn placeable(&self) -> bool { self.genus.placeable() } pub fn assemble( &self, rom: &mut RomBuf, prelude: &str, source: &Path, temp: &Path, iopts: ::insert_opts::InsertOpts ) -> InsertResult<InsertPoint> { let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize); { let mut tempasm = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(temp) .unwrap(); tempasm.write_all(prelude.as_bytes()).unwrap(); let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess. let mut srcf = warnless_result( File::open(source), |e| format!("error opening \"{}\": {}", source.to_string_lossy(), e) )?; srcf.read_to_end(&mut source_buf).unwrap(); tempasm.write_all(&source_buf).unwrap(); } let warns = match asar::patch(temp, rom) { Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(), Err((mut es, mut ws)) => { return Err( (es.drain(..).map(|e| e.into()).collect(), ws.drain(..).map(|w| w.into()).collect()) ) }, }; for print in asar::prints() { let mut chunks = print.split_whitespace(); let fst = chunks.next(); let snd = chunks.next(); match fst { Some("MAIN") => match snd { Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"MAIN\" declaration"), }, Some("INIT") => match snd { Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"INIT\" declaration"), }, Some("DROP") => match snd { Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"DROP\" declaration"), }, None => (), _ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"), } }; if main == 0
if init == 0 && self.needs_init() { return single_error("No init routine"); } if drop == 0 && self.needs_drop() { return single_error("Drop routine required by dys_opts, but not provided"); } if drop!= 0 &&!self.needs_drop() { return single_error("Sprite has a drop routine, but dys_opts doesn't require one"); } if self.needs_drop() &&!iopts.use_drops { return single_error("Sprite needs a drop routine, but drop routines aren't enabled"); } Ok((InsertPoint { main, init, drop }, warns)) } pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) { match self.genus { Genus::Std | Genus::Gen | Genus::Sht | Genus::R1s => { if self.id < 0x200 { let size_ofs = if self.id < 0x100 { self.id as usize } else { self.id as usize + 0x100 }; let size = self.extra_bytes + 3; rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap(); rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap(); let optbase = tables.option_bytes + (self.id as usize * 0x10); rom.set_byte(optbase, self.genus.to_byte()).unwrap(); rom.set_byte(optbase + 1, self.acts_like).unwrap(); rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap(); rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap(); rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap(); rom.set_bytes(optbase + 10, &self.clipping).unwrap(); }; }, Genus::Cls => {}, _ => unimplemented!(), }; } pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) { let ofs = self.id as usize * 3; match self.genus { g if g.placeable() => { rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap(); rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap(); if tables.drop_ptrs!= 0 { rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap(); } }, Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(), _ => unimplemented!(), }; } pub fn name(&self, ebit: bool) -> &String { if ebit && self.name_set.is_some() { self.name_set.as_ref().unwrap() } else { &self.name } } pub fn desc(&self, ebit: bool) -> &String { if ebit && self.desc_set.is_some() { self.desc_set.as_ref().unwrap() } else { &self.desc } } pub fn uses_ebit(&self) -> bool { self.name_set.is_some() } pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) { if!self.placeable() { panic!("Attempted to place unplaceable sprite") }; let b0 = 0x89; let b1 = 0x80; let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 }; let ebit_val: u8 = if!ebit { 0 } else { 4 }; let b0 = b0 | num_extra_bit | ebit_val; target.push(b0); target.push(b1); if self.id >= 0x200 { target.push(0xf8 + self.extra_bytes); } target.push((self.id & 0xff) as u8); for _ in 0.. self.extra_bytes { target.push(0); }; } pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes } pub fn source_path(&self) -> &PathBuf { &self.source_path } } fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) { let root = match path.file_stem() { Some(s) => s.to_string_lossy().into_owned(), None => format!("Custom {} #{:03x}", gen.shortname(), id), }; (root.clone(), root + " (extra bit set)") } fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None); let mut cfg = SpriteCfg { genus: gen, id: id,.. SpriteCfg::new() }; let mut buf = buf; while let IResult::Done(rest, (name, value)) = cfg_line(buf) { buf = rest; match name { "acts-like" => cfg.acts_like = try!(read_byte(value)), "source" => cfg.source_path = path.with_file_name(value), "props" => try!(read_bytes(value, &mut cfg.tweak_bytes)), "xbytes" => cfg.extra_bytes = try!(read_byte(value)), "ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)), "dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)), "ext-clip" => try!(read_bytes(value, &mut cfg.clipping)), "name" => got_name = Some(String::from(value)), "description" => got_desc = Some(String::from(value)), "desc-set" => cfg.desc_set = Some(String::from(value)), "name-set" => cfg.name_set = Some(String::from(value)), "ext-prop-def" | "m16d" | "tilemap" => (), _ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }), }; }; if let Some(s) = got_name { cfg.name = s; } else { let t = default_name(path, gen, id); cfg.name = t.0; cfg.name_set = Some(t.1); }; if let Some(s) = got_desc { cfg.desc = s; } else { cfg.desc = cfg.name.clone(); cfg.desc_set = cfg.name_set.clone(); }; if cfg.source_path.file_name() == None { Err(CfgErr { explain: String::from("Sprite needs a source file") }) } else { Ok(cfg) } } fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let mut it = buf.split_whitespace().skip(1); let mut d = [0u8; 9]; for output_byte in &mut d { if let Some(s) = it.next() { *output_byte = try!(read_byte(s)); } else { return Err(CfgErr{ explain: String::from("Old-style CFG too short") }); } }; let (name, name_set) = default_name(path, gen, id); let (desc, desc_set) = (name.clone(), name_set.clone()); if let Some(s) = it.next() { Ok(SpriteCfg { genus: gen, id: id, acts_like: d[0], tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]], prop_bytes: [d[7], d[8]], source_path: path.with_file_name(s), name: name, name_set: Some(name_set), desc: desc, desc_set: Some(desc_set), .. SpriteCfg::new() }) } else { Err(CfgErr { explain: String::from("Old-style CFG too short") }) } } fn read_byte(s: &str) -> Result<u8, CfgErr> { let iter = s.trim().chars(); let mut n = 0u32; let mut read = false; for ch in iter { if let Some(v) = ch.to_digit(0x10) { n *= 0x10; n += v; read = true; } else { return Err(CfgErr { explain: String::from("Non-byte data in byte field") }) } } if!read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) } } fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> { let mut bytes = Vec::<u8>::with_capacity(buf.len()); for b in s.split_whitespace() { bytes.push(try!(read_byte(b))); }; if bytes.len()!= buf.len() { Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}", buf.len(), bytes.len()) }) } else { for (i, b) in bytes.iter().enumerate() { buf[i] = *b; } Ok(()) } } fn tag_ending_s(ch: char) -> bool { ch =='' || ch == ':' } fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' } named!(cfg_line(&str) -> (&str, &str), chain!( multispace? ~ name: take_till_s!(tag_ending_s) ~ space? ~ tag_s!(":") ~ space? ~ valu: take_till_s!(line_ending_s) ~ multispace? , || (name, valu) ) );
{ return single_error("No main routine"); }
conditional_block
spritecfg.rs
#![allow(dead_code)] extern crate asar; use std::path::{PathBuf, Path}; use std::io::prelude::*; use std::fs::{File, OpenOptions}; use nom::*; use asar::rom::RomBuf; use parse_aux::dys_prefix; use genus::Genus; use dys_tables::DysTables; use insert_err::{InsertResult, format_result, warnless_result, single_error}; #[derive(Debug)] pub struct CfgErr { explain: String, } #[derive(Debug)] pub struct SpriteCfg { pub genus: Genus, pub id: u16, pub tweak_bytes: [u8; 6], pub prop_bytes: [u8; 2], pub clipping: [u8; 4], dys_option_bytes: [u8; 2], acts_like: u8, extra_bytes: u8, name: String, desc: String, name_set: Option<String>, desc_set: Option<String>, source_path: PathBuf, } #[derive(Debug, Copy, Clone)] pub struct InsertPoint { pub main: usize, pub init: usize, pub drop: usize, } impl SpriteCfg { pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { if let IResult::Done(rest, vsn) = dys_prefix(buf) { if vsn!= 1 { return Err(CfgErr { explain: String::from("You have a cfg from the future") }); } else { parse_newstyle(path, gen, id, rest) } } else { parse_oldstyle(path, gen, id, buf) } } pub fn new() -> SpriteCfg { SpriteCfg { genus: Genus::Std, id: 0, tweak_bytes: [0, 0, 0, 0, 0, 0], prop_bytes: [0, 0], clipping: [0, 0, 0, 0], dys_option_bytes: [0, 0], acts_like: 0, extra_bytes: 0, name: "".to_string(), desc: "".to_string(), name_set: None, desc_set: None, source_path: PathBuf::from(""), } } pub fn needs_init(&self) -> bool { match self.genus { Genus::Std => true, _ => false, } } pub fn needs_drop(&self) -> bool { match self.genus { Genus::Std => self.dys_option_bytes[1] & 0x80!= 0, _ => false, } } pub fn placeable(&self) -> bool { self.genus.placeable() } pub fn assemble( &self, rom: &mut RomBuf, prelude: &str, source: &Path, temp: &Path, iopts: ::insert_opts::InsertOpts ) -> InsertResult<InsertPoint> { let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize); { let mut tempasm = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(temp) .unwrap(); tempasm.write_all(prelude.as_bytes()).unwrap(); let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess. let mut srcf = warnless_result( File::open(source), |e| format!("error opening \"{}\": {}", source.to_string_lossy(), e) )?; srcf.read_to_end(&mut source_buf).unwrap(); tempasm.write_all(&source_buf).unwrap(); } let warns = match asar::patch(temp, rom) { Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(), Err((mut es, mut ws)) => { return Err( (es.drain(..).map(|e| e.into()).collect(), ws.drain(..).map(|w| w.into()).collect()) ) }, }; for print in asar::prints() { let mut chunks = print.split_whitespace(); let fst = chunks.next(); let snd = chunks.next(); match fst { Some("MAIN") => match snd { Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"MAIN\" declaration"), }, Some("INIT") => match snd { Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"INIT\" declaration"), }, Some("DROP") => match snd { Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"DROP\" declaration"), }, None => (), _ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"), } }; if main == 0 { return single_error("No main routine"); } if init == 0 && self.needs_init() { return single_error("No init routine"); } if drop == 0 && self.needs_drop() { return single_error("Drop routine required by dys_opts, but not provided"); } if drop!= 0 &&!self.needs_drop() { return single_error("Sprite has a drop routine, but dys_opts doesn't require one"); } if self.needs_drop() &&!iopts.use_drops { return single_error("Sprite needs a drop routine, but drop routines aren't enabled"); } Ok((InsertPoint { main, init, drop }, warns)) } pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) { match self.genus { Genus::Std | Genus::Gen | Genus::Sht | Genus::R1s => { if self.id < 0x200 { let size_ofs = if self.id < 0x100 { self.id as usize } else { self.id as usize + 0x100 }; let size = self.extra_bytes + 3; rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap(); rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap(); let optbase = tables.option_bytes + (self.id as usize * 0x10); rom.set_byte(optbase, self.genus.to_byte()).unwrap(); rom.set_byte(optbase + 1, self.acts_like).unwrap(); rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap(); rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap(); rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap(); rom.set_bytes(optbase + 10, &self.clipping).unwrap(); }; }, Genus::Cls => {}, _ => unimplemented!(), }; } pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) { let ofs = self.id as usize * 3; match self.genus { g if g.placeable() => { rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap(); rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap(); if tables.drop_ptrs!= 0 { rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap(); } }, Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(), _ => unimplemented!(), }; } pub fn
(&self, ebit: bool) -> &String { if ebit && self.name_set.is_some() { self.name_set.as_ref().unwrap() } else { &self.name } } pub fn desc(&self, ebit: bool) -> &String { if ebit && self.desc_set.is_some() { self.desc_set.as_ref().unwrap() } else { &self.desc } } pub fn uses_ebit(&self) -> bool { self.name_set.is_some() } pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) { if!self.placeable() { panic!("Attempted to place unplaceable sprite") }; let b0 = 0x89; let b1 = 0x80; let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 }; let ebit_val: u8 = if!ebit { 0 } else { 4 }; let b0 = b0 | num_extra_bit | ebit_val; target.push(b0); target.push(b1); if self.id >= 0x200 { target.push(0xf8 + self.extra_bytes); } target.push((self.id & 0xff) as u8); for _ in 0.. self.extra_bytes { target.push(0); }; } pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes } pub fn source_path(&self) -> &PathBuf { &self.source_path } } fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) { let root = match path.file_stem() { Some(s) => s.to_string_lossy().into_owned(), None => format!("Custom {} #{:03x}", gen.shortname(), id), }; (root.clone(), root + " (extra bit set)") } fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None); let mut cfg = SpriteCfg { genus: gen, id: id,.. SpriteCfg::new() }; let mut buf = buf; while let IResult::Done(rest, (name, value)) = cfg_line(buf) { buf = rest; match name { "acts-like" => cfg.acts_like = try!(read_byte(value)), "source" => cfg.source_path = path.with_file_name(value), "props" => try!(read_bytes(value, &mut cfg.tweak_bytes)), "xbytes" => cfg.extra_bytes = try!(read_byte(value)), "ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)), "dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)), "ext-clip" => try!(read_bytes(value, &mut cfg.clipping)), "name" => got_name = Some(String::from(value)), "description" => got_desc = Some(String::from(value)), "desc-set" => cfg.desc_set = Some(String::from(value)), "name-set" => cfg.name_set = Some(String::from(value)), "ext-prop-def" | "m16d" | "tilemap" => (), _ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }), }; }; if let Some(s) = got_name { cfg.name = s; } else { let t = default_name(path, gen, id); cfg.name = t.0; cfg.name_set = Some(t.1); }; if let Some(s) = got_desc { cfg.desc = s; } else { cfg.desc = cfg.name.clone(); cfg.desc_set = cfg.name_set.clone(); }; if cfg.source_path.file_name() == None { Err(CfgErr { explain: String::from("Sprite needs a source file") }) } else { Ok(cfg) } } fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let mut it = buf.split_whitespace().skip(1); let mut d = [0u8; 9]; for output_byte in &mut d { if let Some(s) = it.next() { *output_byte = try!(read_byte(s)); } else { return Err(CfgErr{ explain: String::from("Old-style CFG too short") }); } }; let (name, name_set) = default_name(path, gen, id); let (desc, desc_set) = (name.clone(), name_set.clone()); if let Some(s) = it.next() { Ok(SpriteCfg { genus: gen, id: id, acts_like: d[0], tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]], prop_bytes: [d[7], d[8]], source_path: path.with_file_name(s), name: name, name_set: Some(name_set), desc: desc, desc_set: Some(desc_set), .. SpriteCfg::new() }) } else { Err(CfgErr { explain: String::from("Old-style CFG too short") }) } } fn read_byte(s: &str) -> Result<u8, CfgErr> { let iter = s.trim().chars(); let mut n = 0u32; let mut read = false; for ch in iter { if let Some(v) = ch.to_digit(0x10) { n *= 0x10; n += v; read = true; } else { return Err(CfgErr { explain: String::from("Non-byte data in byte field") }) } } if!read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) } } fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> { let mut bytes = Vec::<u8>::with_capacity(buf.len()); for b in s.split_whitespace() { bytes.push(try!(read_byte(b))); }; if bytes.len()!= buf.len() { Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}", buf.len(), bytes.len()) }) } else { for (i, b) in bytes.iter().enumerate() { buf[i] = *b; } Ok(()) } } fn tag_ending_s(ch: char) -> bool { ch =='' || ch == ':' } fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' } named!(cfg_line(&str) -> (&str, &str), chain!( multispace? ~ name: take_till_s!(tag_ending_s) ~ space? ~ tag_s!(":") ~ space? ~ valu: take_till_s!(line_ending_s) ~ multispace? , || (name, valu) ) );
name
identifier_name
spritecfg.rs
#![allow(dead_code)] extern crate asar; use std::path::{PathBuf, Path}; use std::io::prelude::*; use std::fs::{File, OpenOptions}; use nom::*; use asar::rom::RomBuf; use parse_aux::dys_prefix; use genus::Genus; use dys_tables::DysTables; use insert_err::{InsertResult, format_result, warnless_result, single_error}; #[derive(Debug)] pub struct CfgErr { explain: String, } #[derive(Debug)] pub struct SpriteCfg { pub genus: Genus, pub id: u16, pub tweak_bytes: [u8; 6], pub prop_bytes: [u8; 2], pub clipping: [u8; 4], dys_option_bytes: [u8; 2], acts_like: u8, extra_bytes: u8, name: String, desc: String, name_set: Option<String>, desc_set: Option<String>, source_path: PathBuf, } #[derive(Debug, Copy, Clone)] pub struct InsertPoint { pub main: usize, pub init: usize, pub drop: usize, } impl SpriteCfg { pub fn parse(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { if let IResult::Done(rest, vsn) = dys_prefix(buf) { if vsn!= 1 { return Err(CfgErr { explain: String::from("You have a cfg from the future") }); } else { parse_newstyle(path, gen, id, rest) } } else { parse_oldstyle(path, gen, id, buf) } } pub fn new() -> SpriteCfg { SpriteCfg { genus: Genus::Std, id: 0, tweak_bytes: [0, 0, 0, 0, 0, 0], prop_bytes: [0, 0], clipping: [0, 0, 0, 0], dys_option_bytes: [0, 0], acts_like: 0, extra_bytes: 0, name: "".to_string(), desc: "".to_string(), name_set: None, desc_set: None, source_path: PathBuf::from(""), } } pub fn needs_init(&self) -> bool { match self.genus { Genus::Std => true, _ => false, } } pub fn needs_drop(&self) -> bool { match self.genus { Genus::Std => self.dys_option_bytes[1] & 0x80!= 0, _ => false, } } pub fn placeable(&self) -> bool { self.genus.placeable() } pub fn assemble( &self, rom: &mut RomBuf, prelude: &str, source: &Path, temp: &Path, iopts: ::insert_opts::InsertOpts ) -> InsertResult<InsertPoint> { let (mut main, mut init, mut drop) = (0usize, 0usize, 0usize);
{ let mut tempasm = OpenOptions::new() .write(true) .truncate(true) .create(true) .open(temp) .unwrap(); tempasm.write_all(prelude.as_bytes()).unwrap(); let mut source_buf = Vec::<u8>::with_capacity(8 * 1024); // A wild guess. let mut srcf = warnless_result( File::open(source), |e| format!("error opening \"{}\": {}", source.to_string_lossy(), e) )?; srcf.read_to_end(&mut source_buf).unwrap(); tempasm.write_all(&source_buf).unwrap(); } let warns = match asar::patch(temp, rom) { Ok((_, mut ws)) => ws.drain(..).map(|w| w.into()).collect(), Err((mut es, mut ws)) => { return Err( (es.drain(..).map(|e| e.into()).collect(), ws.drain(..).map(|w| w.into()).collect()) ) }, }; for print in asar::prints() { let mut chunks = print.split_whitespace(); let fst = chunks.next(); let snd = chunks.next(); match fst { Some("MAIN") => match snd { Some(ofs) => main = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"MAIN\" declaration"), }, Some("INIT") => match snd { Some(ofs) => init = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"INIT\" declaration"), }, Some("DROP") => match snd { Some(ofs) => drop = usize::from_str_radix(ofs, 16).unwrap(), _ => return single_error("No offset after \"DROP\" declaration"), }, None => (), _ => return single_error("The sprite printed something other than MAIN, INIT, or DROP"), } }; if main == 0 { return single_error("No main routine"); } if init == 0 && self.needs_init() { return single_error("No init routine"); } if drop == 0 && self.needs_drop() { return single_error("Drop routine required by dys_opts, but not provided"); } if drop!= 0 &&!self.needs_drop() { return single_error("Sprite has a drop routine, but dys_opts doesn't require one"); } if self.needs_drop() &&!iopts.use_drops { return single_error("Sprite needs a drop routine, but drop routines aren't enabled"); } Ok((InsertPoint { main, init, drop }, warns)) } pub fn apply_cfg(&self, rom: &mut RomBuf, tables: &DysTables) { match self.genus { Genus::Std | Genus::Gen | Genus::Sht | Genus::R1s => { if self.id < 0x200 { let size_ofs = if self.id < 0x100 { self.id as usize } else { self.id as usize + 0x100 }; let size = self.extra_bytes + 3; rom.set_byte(tables.sprite_sizes + size_ofs, size).unwrap(); rom.set_byte(tables.sprite_sizes + size_ofs + 0x100, size).unwrap(); let optbase = tables.option_bytes + (self.id as usize * 0x10); rom.set_byte(optbase, self.genus.to_byte()).unwrap(); rom.set_byte(optbase + 1, self.acts_like).unwrap(); rom.set_bytes(optbase + 2, &self.tweak_bytes).unwrap(); rom.set_bytes(optbase + 8, &self.dys_option_bytes).unwrap(); rom.set_bytes(optbase + 14, &self.prop_bytes).unwrap(); rom.set_bytes(optbase + 10, &self.clipping).unwrap(); }; }, Genus::Cls => {}, _ => unimplemented!(), }; } pub fn apply_offsets(&self, rom: &mut RomBuf, tables: &DysTables, ip: InsertPoint) { let ofs = self.id as usize * 3; match self.genus { g if g.placeable() => { rom.set_long(tables.main_ptrs + ofs, ip.main as u32).unwrap(); rom.set_long(tables.init_ptrs + ofs, ip.init as u32).unwrap(); if tables.drop_ptrs!= 0 { rom.set_long(tables.drop_ptrs + ofs, ip.drop as u32).unwrap(); } }, Genus::Cls => rom.set_long(tables.cls_ptrs + ofs, ip.main as u32).unwrap(), _ => unimplemented!(), }; } pub fn name(&self, ebit: bool) -> &String { if ebit && self.name_set.is_some() { self.name_set.as_ref().unwrap() } else { &self.name } } pub fn desc(&self, ebit: bool) -> &String { if ebit && self.desc_set.is_some() { self.desc_set.as_ref().unwrap() } else { &self.desc } } pub fn uses_ebit(&self) -> bool { self.name_set.is_some() } pub fn place_mw2(&self, target: &mut Vec<u8>, ebit: bool) { if!self.placeable() { panic!("Attempted to place unplaceable sprite") }; let b0 = 0x89; let b1 = 0x80; let num_extra_bit: u8 = if self.id & 0x100 == 0 { 0 } else { 8 }; let ebit_val: u8 = if!ebit { 0 } else { 4 }; let b0 = b0 | num_extra_bit | ebit_val; target.push(b0); target.push(b1); if self.id >= 0x200 { target.push(0xf8 + self.extra_bytes); } target.push((self.id & 0xff) as u8); for _ in 0.. self.extra_bytes { target.push(0); }; } pub fn dys_option_bytes(&self) -> &[u8] { &self.dys_option_bytes } pub fn source_path(&self) -> &PathBuf { &self.source_path } } fn default_name(path: &Path, gen: Genus, id: u16) -> (String, String) { let root = match path.file_stem() { Some(s) => s.to_string_lossy().into_owned(), None => format!("Custom {} #{:03x}", gen.shortname(), id), }; (root.clone(), root + " (extra bit set)") } fn parse_newstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let (mut got_name, mut got_desc): (Option<String>, Option<String>) = (None, None); let mut cfg = SpriteCfg { genus: gen, id: id,.. SpriteCfg::new() }; let mut buf = buf; while let IResult::Done(rest, (name, value)) = cfg_line(buf) { buf = rest; match name { "acts-like" => cfg.acts_like = try!(read_byte(value)), "source" => cfg.source_path = path.with_file_name(value), "props" => try!(read_bytes(value, &mut cfg.tweak_bytes)), "xbytes" => cfg.extra_bytes = try!(read_byte(value)), "ext-props" => try!(read_bytes(value, &mut cfg.prop_bytes)), "dys-opts" => try!(read_bytes(value, &mut cfg.dys_option_bytes)), "ext-clip" => try!(read_bytes(value, &mut cfg.clipping)), "name" => got_name = Some(String::from(value)), "description" => got_desc = Some(String::from(value)), "desc-set" => cfg.desc_set = Some(String::from(value)), "name-set" => cfg.name_set = Some(String::from(value)), "ext-prop-def" | "m16d" | "tilemap" => (), _ => return Err(CfgErr { explain: format!("bad field name: \"{}\"", name) }), }; }; if let Some(s) = got_name { cfg.name = s; } else { let t = default_name(path, gen, id); cfg.name = t.0; cfg.name_set = Some(t.1); }; if let Some(s) = got_desc { cfg.desc = s; } else { cfg.desc = cfg.name.clone(); cfg.desc_set = cfg.name_set.clone(); }; if cfg.source_path.file_name() == None { Err(CfgErr { explain: String::from("Sprite needs a source file") }) } else { Ok(cfg) } } fn parse_oldstyle(path: &Path, gen: Genus, id: u16, buf: &str) -> Result<SpriteCfg, CfgErr> { let mut it = buf.split_whitespace().skip(1); let mut d = [0u8; 9]; for output_byte in &mut d { if let Some(s) = it.next() { *output_byte = try!(read_byte(s)); } else { return Err(CfgErr{ explain: String::from("Old-style CFG too short") }); } }; let (name, name_set) = default_name(path, gen, id); let (desc, desc_set) = (name.clone(), name_set.clone()); if let Some(s) = it.next() { Ok(SpriteCfg { genus: gen, id: id, acts_like: d[0], tweak_bytes: [d[1], d[2], d[3], d[4], d[5], d[6]], prop_bytes: [d[7], d[8]], source_path: path.with_file_name(s), name: name, name_set: Some(name_set), desc: desc, desc_set: Some(desc_set), .. SpriteCfg::new() }) } else { Err(CfgErr { explain: String::from("Old-style CFG too short") }) } } fn read_byte(s: &str) -> Result<u8, CfgErr> { let iter = s.trim().chars(); let mut n = 0u32; let mut read = false; for ch in iter { if let Some(v) = ch.to_digit(0x10) { n *= 0x10; n += v; read = true; } else { return Err(CfgErr { explain: String::from("Non-byte data in byte field") }) } } if!read { Err(CfgErr { explain: String::from("Expected a byte, found nothing") }) } else { Ok(n as u8) } } fn read_bytes(s: &str, buf: &mut [u8]) -> Result<(), CfgErr> { let mut bytes = Vec::<u8>::with_capacity(buf.len()); for b in s.split_whitespace() { bytes.push(try!(read_byte(b))); }; if bytes.len()!= buf.len() { Err(CfgErr { explain: format!("Wrong length byte sequence: expected {} bytes, got {}", buf.len(), bytes.len()) }) } else { for (i, b) in bytes.iter().enumerate() { buf[i] = *b; } Ok(()) } } fn tag_ending_s(ch: char) -> bool { ch =='' || ch == ':' } fn line_ending_s(ch: char) -> bool { ch == '\r' || ch == '\n' } named!(cfg_line(&str) -> (&str, &str), chain!( multispace? ~ name: take_till_s!(tag_ending_s) ~ space? ~ tag_s!(":") ~ space? ~ valu: take_till_s!(line_ending_s) ~ multispace? , || (name, valu) ) );
random_line_split
jwt.rs
//! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions. use crate::credentials::Credentials; use crate::errors::FirebaseError; use biscuit::jwa::SignatureAlgorithm; use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions}; use chrono::{Duration, Utc}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::ops::Deref; use std::slice::Iter; use std::str::FromStr; type Error = super::errors::FirebaseError; pub static JWT_AUDIENCE_FIRESTORE: &str = "https://firestore.googleapis.com/google.firestore.v1.Firestore"; pub static JWT_AUDIENCE_IDENTITY: &str = "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit"; pub trait PrivateClaims where Self: Serialize + DeserializeOwned + Clone + Default, { fn get_scopes(&self) -> HashSet<String>; fn get_client_id(&self) -> Option<String>; fn get_uid(&self) -> Option<String>; } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtOAuthPrivateClaims { #[serde(skip_serializing_if = "Option::is_none")] pub scope: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub uid: Option<String>, // Probably the firebase User ID if set } impl JwtOAuthPrivateClaims { pub fn new<S: AsRef<str>>( scope: Option<Iter<S>>, client_id: Option<String>, user_id: Option<String>, ) -> Self { JwtOAuthPrivateClaims { scope: scope.and_then(|f| { Some(f.fold(String::new(), |acc, x| { let x: &str = x.as_ref(); return acc + x + " "; })) }), client_id, uid: user_id, } } } impl PrivateClaims for JwtOAuthPrivateClaims { fn get_scopes(&self) -> HashSet<String> { match self.scope { Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(), None => HashSet::new(), } } fn get_client_id(&self) -> Option<String> { self.client_id.clone() } fn get_uid(&self) -> Option<String> { self.uid.clone() } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtCustomClaims { pub uid: String, pub claims: HashMap<String, Value>, } impl JwtCustomClaims { pub fn new<T: Serialize>(uid: &str, claims: T) -> Self { let dev_claims = { let val = serde_json::to_string(&claims).unwrap_or("".to_string()); serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default() }; JwtCustomClaims { claims: dev_claims, uid: uid.to_string(), } } } impl PrivateClaims for JwtCustomClaims { fn get_scopes(&self) -> HashSet<String> { HashSet::new() } fn get_client_id(&self) -> Option<String> { None } fn get_uid(&self) -> Option<String> { Some(self.uid.clone()) } } pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>; #[derive(Serialize, Deserialize, Default, Clone)] pub struct JWSEntry { #[serde(flatten)] pub(crate) headers: biscuit::jws::RegisteredHeader, #[serde(flatten)] pub(crate) ne: biscuit::jwk::RSAKeyParameters, } #[derive(Serialize, Deserialize)] pub struct JWKSetDTO { pub keys: Vec<JWSEntry>, } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::blocking::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send()?; let jwk_set: JWKSetDTO = resp.json()?; Ok(jwk_set) } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications.
.get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send() .await?; let jwk_set: JWKSetDTO = resp.json().await?; Ok(jwk_set) } /// Returns true if the access token (assumed to be a jwt) has expired /// /// An error is returned if the given access token string is not a jwt pub(crate) fn is_expired( access_token: &str, tolerance_in_minutes: i64, ) -> Result<bool, FirebaseError> { let token = AuthClaimsJWT::new_encoded(&access_token); let claims = token.unverified_payload()?; if let Some(expiry) = claims.registered.expiry.as_ref() { let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone()); return Ok(diff.num_minutes() - tolerance_in_minutes > 0); } Ok(true) } /// Returns true if the jwt was updated and needs signing pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool { let ref mut claims = jwt.payload_mut().unwrap().registered; let now = biscuit::Timestamp::from(Utc::now()); if let Some(issued_at) = claims.issued_at.as_ref() { let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone()); if diff.num_minutes() > expire_in_minutes { claims.issued_at = Some(now); } else { return false; } } else { claims.issued_at = Some(now); } true } pub(crate) fn create_jwt<S>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<AuthClaimsJWT, Error> where S: AsRef<str>, { let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id); create_jwt_with_claims(credentials, duration, audience, claims) } pub(crate) fn create_jwt_encoded<S: AsRef<str>>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<String, Error> { let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?; let secret = credentials .keys .secret .as_ref() .ok_or(Error::Generic("No private key added via add_keypair_key!"))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } fn create_jwt_with_claims<T>( credentials: &Credentials, duration: chrono::Duration, audience: &str, claims: T, ) -> Result<biscuit::JWT<T, biscuit::Empty>, Error> where T: Serialize + DeserializeOwned, { use biscuit::{ jws::{Header, RegisteredHeader}, ClaimsSet, Empty, RegisteredClaims, }; let header: Header<Empty> = Header::from(RegisteredHeader { algorithm: SignatureAlgorithm::RS256, key_id: Some(credentials.private_key_id.to_owned()), ..Default::default() }); let expected_claims = ClaimsSet::<T> { registered: RegisteredClaims { issuer: Some(FromStr::from_str(&credentials.client_email)?), audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)), expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))), subject: Some(StringOrUri::from_str(&credentials.client_email)?), issued_at: Some(biscuit::Timestamp::from(Utc::now())), ..Default::default() }, private: claims, }; Ok(biscuit::JWT::new_decoded(header, expected_claims)) } pub fn create_custom_jwt_encoded<T: PrivateClaims>( credentials: &Credentials, claims: T, ) -> Result<String, Error> { let jwt = create_jwt_with_claims( &credentials, Duration::hours(1), JWT_AUDIENCE_IDENTITY, claims, )?; let secret = credentials .keys .secret .as_ref() .ok_or(FirebaseError::Generic( "No private key added via add_keypair_key!", ))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> { pub claims: T, pub audience: String, pub subject: String, } impl TokenValidationResult { pub fn get_scopes(&self) -> HashSet<String> { self.claims.get_scopes() } } pub(crate) fn verify_access_token( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult, Error> { verify_access_token_with_claims(credentials, access_token) } pub fn verify_access_token_with_claims<T: PrivateClaims>( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult<T>, Error> { let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token); let header = token.unverified_header()?; let kid = header .registered .key_id .as_ref() .ok_or(FirebaseError::Generic("No jwt kid"))?; let secret = credentials .decode_secret(kid) .ok_or(FirebaseError::Generic("No secret for kid"))?; let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?; use biscuit::Presence::*; let o = ValidationOptions { claim_presence_options: ClaimPresenceOptions { issued_at: Required, not_before: Optional, expiry: Required, issuer: Required, audience: Required, subject: Required, id: Optional, }, // audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?), ..Default::default() }; let claims = token.payload()?; claims.registered.validate(o)?; let audience = match claims.registered.audience.as_ref().unwrap() { SingleOrMultiple::Single(v) => v.to_string(), SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(), }; Ok(TokenValidationResult { claims: claims.private.clone(), subject: claims.registered.subject.as_ref().unwrap().to_string(), audience, }) }
pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::Client::new()
random_line_split
jwt.rs
//! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions. use crate::credentials::Credentials; use crate::errors::FirebaseError; use biscuit::jwa::SignatureAlgorithm; use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions}; use chrono::{Duration, Utc}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::ops::Deref; use std::slice::Iter; use std::str::FromStr; type Error = super::errors::FirebaseError; pub static JWT_AUDIENCE_FIRESTORE: &str = "https://firestore.googleapis.com/google.firestore.v1.Firestore"; pub static JWT_AUDIENCE_IDENTITY: &str = "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit"; pub trait PrivateClaims where Self: Serialize + DeserializeOwned + Clone + Default, { fn get_scopes(&self) -> HashSet<String>; fn get_client_id(&self) -> Option<String>; fn get_uid(&self) -> Option<String>; } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtOAuthPrivateClaims { #[serde(skip_serializing_if = "Option::is_none")] pub scope: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub uid: Option<String>, // Probably the firebase User ID if set } impl JwtOAuthPrivateClaims { pub fn new<S: AsRef<str>>( scope: Option<Iter<S>>, client_id: Option<String>, user_id: Option<String>, ) -> Self { JwtOAuthPrivateClaims { scope: scope.and_then(|f| { Some(f.fold(String::new(), |acc, x| { let x: &str = x.as_ref(); return acc + x + " "; })) }), client_id, uid: user_id, } } } impl PrivateClaims for JwtOAuthPrivateClaims { fn get_scopes(&self) -> HashSet<String> { match self.scope { Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(), None => HashSet::new(), } } fn get_client_id(&self) -> Option<String> { self.client_id.clone() } fn get_uid(&self) -> Option<String> { self.uid.clone() } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtCustomClaims { pub uid: String, pub claims: HashMap<String, Value>, } impl JwtCustomClaims { pub fn new<T: Serialize>(uid: &str, claims: T) -> Self { let dev_claims = { let val = serde_json::to_string(&claims).unwrap_or("".to_string()); serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default() }; JwtCustomClaims { claims: dev_claims, uid: uid.to_string(), } } } impl PrivateClaims for JwtCustomClaims { fn get_scopes(&self) -> HashSet<String> { HashSet::new() } fn get_client_id(&self) -> Option<String> { None } fn get_uid(&self) -> Option<String> { Some(self.uid.clone()) } } pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>; #[derive(Serialize, Deserialize, Default, Clone)] pub struct
{ #[serde(flatten)] pub(crate) headers: biscuit::jws::RegisteredHeader, #[serde(flatten)] pub(crate) ne: biscuit::jwk::RSAKeyParameters, } #[derive(Serialize, Deserialize)] pub struct JWKSetDTO { pub keys: Vec<JWSEntry>, } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::blocking::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send()?; let jwk_set: JWKSetDTO = resp.json()?; Ok(jwk_set) } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send() .await?; let jwk_set: JWKSetDTO = resp.json().await?; Ok(jwk_set) } /// Returns true if the access token (assumed to be a jwt) has expired /// /// An error is returned if the given access token string is not a jwt pub(crate) fn is_expired( access_token: &str, tolerance_in_minutes: i64, ) -> Result<bool, FirebaseError> { let token = AuthClaimsJWT::new_encoded(&access_token); let claims = token.unverified_payload()?; if let Some(expiry) = claims.registered.expiry.as_ref() { let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone()); return Ok(diff.num_minutes() - tolerance_in_minutes > 0); } Ok(true) } /// Returns true if the jwt was updated and needs signing pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool { let ref mut claims = jwt.payload_mut().unwrap().registered; let now = biscuit::Timestamp::from(Utc::now()); if let Some(issued_at) = claims.issued_at.as_ref() { let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone()); if diff.num_minutes() > expire_in_minutes { claims.issued_at = Some(now); } else { return false; } } else { claims.issued_at = Some(now); } true } pub(crate) fn create_jwt<S>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<AuthClaimsJWT, Error> where S: AsRef<str>, { let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id); create_jwt_with_claims(credentials, duration, audience, claims) } pub(crate) fn create_jwt_encoded<S: AsRef<str>>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<String, Error> { let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?; let secret = credentials .keys .secret .as_ref() .ok_or(Error::Generic("No private key added via add_keypair_key!"))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } fn create_jwt_with_claims<T>( credentials: &Credentials, duration: chrono::Duration, audience: &str, claims: T, ) -> Result<biscuit::JWT<T, biscuit::Empty>, Error> where T: Serialize + DeserializeOwned, { use biscuit::{ jws::{Header, RegisteredHeader}, ClaimsSet, Empty, RegisteredClaims, }; let header: Header<Empty> = Header::from(RegisteredHeader { algorithm: SignatureAlgorithm::RS256, key_id: Some(credentials.private_key_id.to_owned()), ..Default::default() }); let expected_claims = ClaimsSet::<T> { registered: RegisteredClaims { issuer: Some(FromStr::from_str(&credentials.client_email)?), audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)), expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))), subject: Some(StringOrUri::from_str(&credentials.client_email)?), issued_at: Some(biscuit::Timestamp::from(Utc::now())), ..Default::default() }, private: claims, }; Ok(biscuit::JWT::new_decoded(header, expected_claims)) } pub fn create_custom_jwt_encoded<T: PrivateClaims>( credentials: &Credentials, claims: T, ) -> Result<String, Error> { let jwt = create_jwt_with_claims( &credentials, Duration::hours(1), JWT_AUDIENCE_IDENTITY, claims, )?; let secret = credentials .keys .secret .as_ref() .ok_or(FirebaseError::Generic( "No private key added via add_keypair_key!", ))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> { pub claims: T, pub audience: String, pub subject: String, } impl TokenValidationResult { pub fn get_scopes(&self) -> HashSet<String> { self.claims.get_scopes() } } pub(crate) fn verify_access_token( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult, Error> { verify_access_token_with_claims(credentials, access_token) } pub fn verify_access_token_with_claims<T: PrivateClaims>( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult<T>, Error> { let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token); let header = token.unverified_header()?; let kid = header .registered .key_id .as_ref() .ok_or(FirebaseError::Generic("No jwt kid"))?; let secret = credentials .decode_secret(kid) .ok_or(FirebaseError::Generic("No secret for kid"))?; let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?; use biscuit::Presence::*; let o = ValidationOptions { claim_presence_options: ClaimPresenceOptions { issued_at: Required, not_before: Optional, expiry: Required, issuer: Required, audience: Required, subject: Required, id: Optional, }, // audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?), ..Default::default() }; let claims = token.payload()?; claims.registered.validate(o)?; let audience = match claims.registered.audience.as_ref().unwrap() { SingleOrMultiple::Single(v) => v.to_string(), SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(), }; Ok(TokenValidationResult { claims: claims.private.clone(), subject: claims.registered.subject.as_ref().unwrap().to_string(), audience, }) }
JWSEntry
identifier_name
jwt.rs
//! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions. use crate::credentials::Credentials; use crate::errors::FirebaseError; use biscuit::jwa::SignatureAlgorithm; use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions}; use chrono::{Duration, Utc}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::ops::Deref; use std::slice::Iter; use std::str::FromStr; type Error = super::errors::FirebaseError; pub static JWT_AUDIENCE_FIRESTORE: &str = "https://firestore.googleapis.com/google.firestore.v1.Firestore"; pub static JWT_AUDIENCE_IDENTITY: &str = "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit"; pub trait PrivateClaims where Self: Serialize + DeserializeOwned + Clone + Default, { fn get_scopes(&self) -> HashSet<String>; fn get_client_id(&self) -> Option<String>; fn get_uid(&self) -> Option<String>; } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtOAuthPrivateClaims { #[serde(skip_serializing_if = "Option::is_none")] pub scope: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub uid: Option<String>, // Probably the firebase User ID if set } impl JwtOAuthPrivateClaims { pub fn new<S: AsRef<str>>( scope: Option<Iter<S>>, client_id: Option<String>, user_id: Option<String>, ) -> Self { JwtOAuthPrivateClaims { scope: scope.and_then(|f| { Some(f.fold(String::new(), |acc, x| { let x: &str = x.as_ref(); return acc + x + " "; })) }), client_id, uid: user_id, } } } impl PrivateClaims for JwtOAuthPrivateClaims { fn get_scopes(&self) -> HashSet<String> { match self.scope { Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(), None => HashSet::new(), } } fn get_client_id(&self) -> Option<String> { self.client_id.clone() } fn get_uid(&self) -> Option<String> { self.uid.clone() } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtCustomClaims { pub uid: String, pub claims: HashMap<String, Value>, } impl JwtCustomClaims { pub fn new<T: Serialize>(uid: &str, claims: T) -> Self { let dev_claims = { let val = serde_json::to_string(&claims).unwrap_or("".to_string()); serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default() }; JwtCustomClaims { claims: dev_claims, uid: uid.to_string(), } } } impl PrivateClaims for JwtCustomClaims { fn get_scopes(&self) -> HashSet<String> { HashSet::new() } fn get_client_id(&self) -> Option<String> { None } fn get_uid(&self) -> Option<String> { Some(self.uid.clone()) } } pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>; #[derive(Serialize, Deserialize, Default, Clone)] pub struct JWSEntry { #[serde(flatten)] pub(crate) headers: biscuit::jws::RegisteredHeader, #[serde(flatten)] pub(crate) ne: biscuit::jwk::RSAKeyParameters, } #[derive(Serialize, Deserialize)] pub struct JWKSetDTO { pub keys: Vec<JWSEntry>, } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::blocking::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send()?; let jwk_set: JWKSetDTO = resp.json()?; Ok(jwk_set) } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send() .await?; let jwk_set: JWKSetDTO = resp.json().await?; Ok(jwk_set) } /// Returns true if the access token (assumed to be a jwt) has expired /// /// An error is returned if the given access token string is not a jwt pub(crate) fn is_expired( access_token: &str, tolerance_in_minutes: i64, ) -> Result<bool, FirebaseError> { let token = AuthClaimsJWT::new_encoded(&access_token); let claims = token.unverified_payload()?; if let Some(expiry) = claims.registered.expiry.as_ref() { let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone()); return Ok(diff.num_minutes() - tolerance_in_minutes > 0); } Ok(true) } /// Returns true if the jwt was updated and needs signing pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool { let ref mut claims = jwt.payload_mut().unwrap().registered; let now = biscuit::Timestamp::from(Utc::now()); if let Some(issued_at) = claims.issued_at.as_ref() { let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone()); if diff.num_minutes() > expire_in_minutes { claims.issued_at = Some(now); } else { return false; } } else { claims.issued_at = Some(now); } true } pub(crate) fn create_jwt<S>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<AuthClaimsJWT, Error> where S: AsRef<str>, { let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id); create_jwt_with_claims(credentials, duration, audience, claims) } pub(crate) fn create_jwt_encoded<S: AsRef<str>>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<String, Error> { let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?; let secret = credentials .keys .secret .as_ref() .ok_or(Error::Generic("No private key added via add_keypair_key!"))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } fn create_jwt_with_claims<T>( credentials: &Credentials, duration: chrono::Duration, audience: &str, claims: T, ) -> Result<biscuit::JWT<T, biscuit::Empty>, Error> where T: Serialize + DeserializeOwned, { use biscuit::{ jws::{Header, RegisteredHeader}, ClaimsSet, Empty, RegisteredClaims, }; let header: Header<Empty> = Header::from(RegisteredHeader { algorithm: SignatureAlgorithm::RS256, key_id: Some(credentials.private_key_id.to_owned()), ..Default::default() }); let expected_claims = ClaimsSet::<T> { registered: RegisteredClaims { issuer: Some(FromStr::from_str(&credentials.client_email)?), audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)), expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))), subject: Some(StringOrUri::from_str(&credentials.client_email)?), issued_at: Some(biscuit::Timestamp::from(Utc::now())), ..Default::default() }, private: claims, }; Ok(biscuit::JWT::new_decoded(header, expected_claims)) } pub fn create_custom_jwt_encoded<T: PrivateClaims>( credentials: &Credentials, claims: T, ) -> Result<String, Error> { let jwt = create_jwt_with_claims( &credentials, Duration::hours(1), JWT_AUDIENCE_IDENTITY, claims, )?; let secret = credentials .keys .secret .as_ref() .ok_or(FirebaseError::Generic( "No private key added via add_keypair_key!", ))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> { pub claims: T, pub audience: String, pub subject: String, } impl TokenValidationResult { pub fn get_scopes(&self) -> HashSet<String>
} pub(crate) fn verify_access_token( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult, Error> { verify_access_token_with_claims(credentials, access_token) } pub fn verify_access_token_with_claims<T: PrivateClaims>( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult<T>, Error> { let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token); let header = token.unverified_header()?; let kid = header .registered .key_id .as_ref() .ok_or(FirebaseError::Generic("No jwt kid"))?; let secret = credentials .decode_secret(kid) .ok_or(FirebaseError::Generic("No secret for kid"))?; let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?; use biscuit::Presence::*; let o = ValidationOptions { claim_presence_options: ClaimPresenceOptions { issued_at: Required, not_before: Optional, expiry: Required, issuer: Required, audience: Required, subject: Required, id: Optional, }, // audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?), ..Default::default() }; let claims = token.payload()?; claims.registered.validate(o)?; let audience = match claims.registered.audience.as_ref().unwrap() { SingleOrMultiple::Single(v) => v.to_string(), SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(), }; Ok(TokenValidationResult { claims: claims.private.clone(), subject: claims.registered.subject.as_ref().unwrap().to_string(), audience, }) }
{ self.claims.get_scopes() }
identifier_body
jwt.rs
//! # A Firestore Auth Session token is a Javascript Web Token (JWT). This module contains JWT helper functions. use crate::credentials::Credentials; use crate::errors::FirebaseError; use biscuit::jwa::SignatureAlgorithm; use biscuit::{ClaimPresenceOptions, SingleOrMultiple, StringOrUri, ValidationOptions}; use chrono::{Duration, Utc}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; use std::collections::{HashMap, HashSet}; use std::ops::Add; use std::ops::Deref; use std::slice::Iter; use std::str::FromStr; type Error = super::errors::FirebaseError; pub static JWT_AUDIENCE_FIRESTORE: &str = "https://firestore.googleapis.com/google.firestore.v1.Firestore"; pub static JWT_AUDIENCE_IDENTITY: &str = "https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit"; pub trait PrivateClaims where Self: Serialize + DeserializeOwned + Clone + Default, { fn get_scopes(&self) -> HashSet<String>; fn get_client_id(&self) -> Option<String>; fn get_uid(&self) -> Option<String>; } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtOAuthPrivateClaims { #[serde(skip_serializing_if = "Option::is_none")] pub scope: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub client_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub uid: Option<String>, // Probably the firebase User ID if set } impl JwtOAuthPrivateClaims { pub fn new<S: AsRef<str>>( scope: Option<Iter<S>>, client_id: Option<String>, user_id: Option<String>, ) -> Self { JwtOAuthPrivateClaims { scope: scope.and_then(|f| { Some(f.fold(String::new(), |acc, x| { let x: &str = x.as_ref(); return acc + x + " "; })) }), client_id, uid: user_id, } } } impl PrivateClaims for JwtOAuthPrivateClaims { fn get_scopes(&self) -> HashSet<String> { match self.scope { Some(ref v) => v.split(" ").map(|f| f.to_owned()).collect(), None => HashSet::new(), } } fn get_client_id(&self) -> Option<String> { self.client_id.clone() } fn get_uid(&self) -> Option<String> { self.uid.clone() } } #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct JwtCustomClaims { pub uid: String, pub claims: HashMap<String, Value>, } impl JwtCustomClaims { pub fn new<T: Serialize>(uid: &str, claims: T) -> Self { let dev_claims = { let val = serde_json::to_string(&claims).unwrap_or("".to_string()); serde_json::from_str::<HashMap<String, Value>>(&val).unwrap_or_default() }; JwtCustomClaims { claims: dev_claims, uid: uid.to_string(), } } } impl PrivateClaims for JwtCustomClaims { fn get_scopes(&self) -> HashSet<String> { HashSet::new() } fn get_client_id(&self) -> Option<String> { None } fn get_uid(&self) -> Option<String> { Some(self.uid.clone()) } } pub(crate) type AuthClaimsJWT = biscuit::JWT<JwtOAuthPrivateClaims, biscuit::Empty>; #[derive(Serialize, Deserialize, Default, Clone)] pub struct JWSEntry { #[serde(flatten)] pub(crate) headers: biscuit::jws::RegisteredHeader, #[serde(flatten)] pub(crate) ne: biscuit::jwk::RSAKeyParameters, } #[derive(Serialize, Deserialize)] pub struct JWKSetDTO { pub keys: Vec<JWSEntry>, } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub fn download_google_jwks(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::blocking::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send()?; let jwk_set: JWKSetDTO = resp.json()?; Ok(jwk_set) } /// Download the Google JWK Set for a given service account. /// The resulting set of JWKs need to be added to a credentials object /// for jwk verifications. pub async fn download_google_jwks_async(account_mail: &str) -> Result<JWKSetDTO, Error> { let resp = reqwest::Client::new() .get(&format!( "https://www.googleapis.com/service_accounts/v1/jwk/{}", account_mail )) .send() .await?; let jwk_set: JWKSetDTO = resp.json().await?; Ok(jwk_set) } /// Returns true if the access token (assumed to be a jwt) has expired /// /// An error is returned if the given access token string is not a jwt pub(crate) fn is_expired( access_token: &str, tolerance_in_minutes: i64, ) -> Result<bool, FirebaseError> { let token = AuthClaimsJWT::new_encoded(&access_token); let claims = token.unverified_payload()?; if let Some(expiry) = claims.registered.expiry.as_ref()
Ok(true) } /// Returns true if the jwt was updated and needs signing pub(crate) fn jwt_update_expiry_if(jwt: &mut AuthClaimsJWT, expire_in_minutes: i64) -> bool { let ref mut claims = jwt.payload_mut().unwrap().registered; let now = biscuit::Timestamp::from(Utc::now()); if let Some(issued_at) = claims.issued_at.as_ref() { let diff: Duration = Utc::now().signed_duration_since(issued_at.deref().clone()); if diff.num_minutes() > expire_in_minutes { claims.issued_at = Some(now); } else { return false; } } else { claims.issued_at = Some(now); } true } pub(crate) fn create_jwt<S>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<AuthClaimsJWT, Error> where S: AsRef<str>, { let claims = JwtOAuthPrivateClaims::new(scope, client_id, user_id); create_jwt_with_claims(credentials, duration, audience, claims) } pub(crate) fn create_jwt_encoded<S: AsRef<str>>( credentials: &Credentials, scope: Option<Iter<S>>, duration: chrono::Duration, client_id: Option<String>, user_id: Option<String>, audience: &str, ) -> Result<String, Error> { let jwt = create_jwt(credentials, scope, duration, client_id, user_id, audience)?; let secret = credentials .keys .secret .as_ref() .ok_or(Error::Generic("No private key added via add_keypair_key!"))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } fn create_jwt_with_claims<T>( credentials: &Credentials, duration: chrono::Duration, audience: &str, claims: T, ) -> Result<biscuit::JWT<T, biscuit::Empty>, Error> where T: Serialize + DeserializeOwned, { use biscuit::{ jws::{Header, RegisteredHeader}, ClaimsSet, Empty, RegisteredClaims, }; let header: Header<Empty> = Header::from(RegisteredHeader { algorithm: SignatureAlgorithm::RS256, key_id: Some(credentials.private_key_id.to_owned()), ..Default::default() }); let expected_claims = ClaimsSet::<T> { registered: RegisteredClaims { issuer: Some(FromStr::from_str(&credentials.client_email)?), audience: Some(SingleOrMultiple::Single(StringOrUri::from_str(audience)?)), expiry: Some(biscuit::Timestamp::from(Utc::now().add(duration))), subject: Some(StringOrUri::from_str(&credentials.client_email)?), issued_at: Some(biscuit::Timestamp::from(Utc::now())), ..Default::default() }, private: claims, }; Ok(biscuit::JWT::new_decoded(header, expected_claims)) } pub fn create_custom_jwt_encoded<T: PrivateClaims>( credentials: &Credentials, claims: T, ) -> Result<String, Error> { let jwt = create_jwt_with_claims( &credentials, Duration::hours(1), JWT_AUDIENCE_IDENTITY, claims, )?; let secret = credentials .keys .secret .as_ref() .ok_or(FirebaseError::Generic( "No private key added via add_keypair_key!", ))?; Ok(jwt.encode(&secret.deref())?.encoded()?.encode()) } pub struct TokenValidationResult<T: PrivateClaims = JwtOAuthPrivateClaims> { pub claims: T, pub audience: String, pub subject: String, } impl TokenValidationResult { pub fn get_scopes(&self) -> HashSet<String> { self.claims.get_scopes() } } pub(crate) fn verify_access_token( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult, Error> { verify_access_token_with_claims(credentials, access_token) } pub fn verify_access_token_with_claims<T: PrivateClaims>( credentials: &Credentials, access_token: &str, ) -> Result<TokenValidationResult<T>, Error> { let token = biscuit::JWT::<T, biscuit::Empty>::new_encoded(&access_token); let header = token.unverified_header()?; let kid = header .registered .key_id .as_ref() .ok_or(FirebaseError::Generic("No jwt kid"))?; let secret = credentials .decode_secret(kid) .ok_or(FirebaseError::Generic("No secret for kid"))?; let token = token.into_decoded(&secret.deref(), SignatureAlgorithm::RS256)?; use biscuit::Presence::*; let o = ValidationOptions { claim_presence_options: ClaimPresenceOptions { issued_at: Required, not_before: Optional, expiry: Required, issuer: Required, audience: Required, subject: Required, id: Optional, }, // audience: Validation::Validate(StringOrUri::from_str(JWT_SUBJECT)?), ..Default::default() }; let claims = token.payload()?; claims.registered.validate(o)?; let audience = match claims.registered.audience.as_ref().unwrap() { SingleOrMultiple::Single(v) => v.to_string(), SingleOrMultiple::Multiple(v) => v.get(0).unwrap().to_string(), }; Ok(TokenValidationResult { claims: claims.private.clone(), subject: claims.registered.subject.as_ref().unwrap().to_string(), audience, }) }
{ let diff: Duration = Utc::now().signed_duration_since(expiry.deref().clone()); return Ok(diff.num_minutes() - tolerance_in_minutes > 0); }
conditional_block
spy.rs
use crate::{backend::Backend, error::error}; use cloudevents::{ event::{Data, ExtensionValue}, AttributesReader, Event, }; use drogue_cloud_service_api::{EXT_APPLICATION, EXT_DEVICE}; use itertools::Itertools; use patternfly_yew::*; use unicode_segmentation::UnicodeSegmentation; use wasm_bindgen::{closure::Closure, JsValue}; use web_sys::{EventSource, EventSourceInit}; use yew::prelude::*; pub struct Spy { link: ComponentLink<Self>, source: Option<EventSource>, events: SharedTableModel<Entry>, application: String, running: bool, total_received: usize, } pub enum Msg { Start(Option<String>), StartPressed, Stop, Event(Box<Event>), /// Failed when processing an event Error(String), /// Source failed Failed, SetApplication(String), } const DEFAULT_MAX_SIZE: usize = 200; #[derive(Clone, Debug, PartialEq)] pub struct Entry(pub Event); impl TableRenderer for Entry { fn render(&self, col: ColumnIndex) -> Html { match col.index { // timestamp 0 => render_timestamp(&self.0), // device id 1 => self.device().into(), // payload 2 => render_data_short(&self.0), // ignore _ => html! {}, } } fn render_details(&self) -> Vec<Span> { vec![Span::max(render_details(&self.0)).truncate()] } } impl Entry { fn device(&self) -> String { let app_id = self.extension_as_string(EXT_APPLICATION); let device_id = self.extension_as_string(EXT_DEVICE); format!("{} / {}", app_id, device_id) } fn extension_as_string(&self, name: &str) -> String { self.0 .extension(name) .map(|s| match s { ExtensionValue::String(s) => s.clone(), ExtensionValue::Integer(i) => i.to_string(), ExtensionValue::Boolean(true) => "true".into(), ExtensionValue::Boolean(false) => "false".into(), }) .unwrap_or_default() } } impl Component for Spy { type Message = Msg; type Properties = (); fn create(_props: Self::Properties, link: ComponentLink<Self>) -> Self { Self { events: Default::default(), link, source: None, running: false, total_received: 0, application: String::new(), } } fn update(&mut self, msg: Self::Message) -> ShouldRender { match msg { Msg::Start(app_id) => { log::info!("Starting: {:?}", app_id); self.start(app_id); } Msg::StartPressed => { self.link.send_message(Msg::Start(self.app_id_filter())); } Msg::Stop => { self.stop(); } Msg::Event(event) => { // log::debug!("Pushing event: {:?}", event); self.total_received += 1; self.events.insert(0, Entry(*event)); while self.events.len() > DEFAULT_MAX_SIZE { self.events.pop(); } } Msg::Error(err) => { error("Failed to process event", err); } Msg::Failed => { error("Source error", "Failed to connect to the event source"); self.running = false; } Msg::SetApplication(application) => { self.application = application; } } true } fn change(&mut self, _props: Self::Properties) -> ShouldRender { false } fn view(&self) -> Html { let is_valid = self.app_id_filter().is_some(); let is_running = self.running; let v = |value: &str| match value { "" => InputState::Error, _ => InputState::Default, }; return html! { <> <PageSection variant=PageSectionVariant::Light limit_width=true> <Content> <Title>{"Device Message Spy"}</Title> </Content> </PageSection> <PageSection> <Toolbar> <ToolbarGroup> <ToolbarItem> <TextInput disabled=self.running onchange=self.link.callback(|app|Msg::SetApplication(app)) validator=Validator::from(v) placeholder="Application ID to spy on"/> </ToolbarItem> <ToolbarItem> {if is_running { html!{<Button disabled=!is_valid label="Stop" icon=Icon::Pause variant=Variant::Secondary onclick=self.link.callback(|_|Msg::Stop) />} } else { html!{<Button disabled=!is_valid label="Start" icon=Icon::Play variant=Variant::Primary onclick=self.link.callback(|_|Msg::StartPressed) />} }} </ToolbarItem> </ToolbarGroup> <ToolbarItem modifiers=vec![ToolbarElementModifier::Right.all()]> { if self.running { html!{ <strong>{"events received: "}{self.total_received}</strong> } } else { html!{} } } </ToolbarItem> </Toolbar> <Table<SharedTableModel<Entry>> entries=self.events.clone() mode=TableMode::CompactExpandable header={html_nested!{ <TableHeader> <TableColumn label="Timestamp (UTC)"/> <TableColumn label="Device ID"/> <TableColumn label="Payload"/> </TableHeader> }} > </Table<SharedTableModel<Entry>>> { if self.events.is_empty() { self.render_empty() } else { html!{} }} </PageSection> </> }; } fn destroy(&mut self) { if let Some(source) = self.source.take() { source.close(); } } } impl Spy { fn app_id_filter(&self) -> Option<String> { let value = self.application.clone(); match value.is_empty() { true => None, false => Some(value), } } fn start(&mut self, app_id: Option<String>) { let mut url = Backend::url("/api/console/v1alpha1/spy").unwrap(); // add optional filter if let Some(app_id) = &app_id { url.query_pairs_mut().append_pair("app", app_id); } // EventSource doesn't support passing headers, so we cannot send // the bearer token the normal way url.query_pairs_mut() .append_pair("token", &Backend::access_token().unwrap_or_default()); // create source let source = EventSource::new_with_event_source_init_dict(&url.to_string(), &EventSourceInit::new()) .unwrap(); // setup onmessage let link = self.link.clone(); let on_message = Closure::wrap(Box::new(move |msg: &JsValue| { let msg = extract_event(msg); link.send_message(msg); }) as Box<dyn FnMut(&JsValue)>); source.set_onmessage(Some(&on_message.into_js_value().into()));
let on_error = Closure::wrap(Box::new(move || { link.send_message(Msg::Failed); }) as Box<dyn FnMut()>); source.set_onerror(Some(&on_error.into_js_value().into())); // store result self.running = true; self.source = Some(source); } fn stop(&mut self) { if let Some(source) = self.source.take() { source.close(); } self.running = false } fn render_empty(&self) -> Html { return html! { <div style="padding-bottom: 10rem; height: 100%;"> <Bullseye> <EmptyState title="No new messages" icon=Icon::Pending size=Size::XLarge > { "The " } <q> {"message spy"} </q> { " will only show "} <strong> {"new"} </strong> {" messages received by the system. When the next message arrives, you will see it right here." } </EmptyState> </Bullseye> </div> }; } } fn extract_event(msg: &JsValue) -> Msg { // web_sys::console::debug_2(&JsValue::from("event: "), msg); let data: String = js_sys::Reflect::get(msg, &JsValue::from("data")) .unwrap() .as_string() .unwrap(); match serde_json::from_str(&data) { Ok(event) => Msg::Event(event), Err(e) => Msg::Error(e.to_string()), } } fn render_data(event: &Event) -> Html { // let data: Option<Data> = event.get_data(); match event.data() { None => html! {}, Some(Data::String(text)) => html! { <pre> {text} </pre> }, Some(Data::Binary(blob)) => html! { <> <pre> { pretty_hex::pretty_hex(&blob) } </pre> <pre> { base64_block(&blob) } </pre> </> }, Some(Data::Json(value)) => { let value = serde_json::to_string_pretty(&value).unwrap(); return html! { <pre> {value} </pre> }; } } } fn base64_block(input: &[u8]) -> String { base64::encode(input) .chars() .collect::<Vec<_>>() .chunks(120) .map(|chunk| chunk.iter().collect::<String>()) .join("\n") } fn render_blob(blob: &[u8]) -> String { let max = blob.len().min(25); let ellipsis = if blob.len() > max { ", …" } else { "" }; format!("[{}; {:02x?}{}]", blob.len(), &blob[0..max], ellipsis) } fn truncate_str(len: usize, string: &str) -> String { let mut r = String::new(); for c in string.graphemes(true) { if r.len() > len || r.contains('\n') || r.contains('\r') { r.push('…'); break; } r.push_str(c); } r } fn render_data_short(event: &Event) -> Html { match event.data() { None => html! {}, Some(Data::String(text)) => html! { <pre> <Label label="String" color=Color::Purple/>{" "}{truncate_str(100, text)} </pre> }, Some(Data::Binary(blob)) => html! { <pre> <Label label="BLOB" color=Color::Blue/>{" "}{render_blob(&blob)} </pre> }, Some(Data::Json(value)) => html! { <pre> <Label label="JSON" color=Color::Cyan/>{" "}{truncate_str(100, &value.to_string())} </pre> }, } } fn render_timestamp(event: &Event) -> Html { event .time() .map(|ts| { return html! { <span> <pre>{ts.format("%H:%M:%S%.3f %Y-%m-%d")}</pre> </span> }; }) .unwrap_or_default() } #[derive(Clone, Debug, PartialEq)] struct AttributeEntry(pub String, pub Html); impl TableRenderer for AttributeEntry { fn render(&self, index: ColumnIndex) -> Html { match index.index { 0 => html! {&self.0}, 1 => self.1.clone(), _ => html! {}, } } } fn render_details(event: &Event) -> Html { let mut attrs: Vec<AttributeEntry> = event .iter() .map(|(key, value)| { ( key.to_string(), html! { <pre class="pf-c-table__text">{ value.to_string() }</pre> }, ) }) .map(|(key, value)| AttributeEntry(key, value)) .collect(); attrs.sort_by(|a, b| a.0.cmp(&b.0)); return html! { <> <h3>{"Attributes"}</h3> <Table<SimpleTableModel<AttributeEntry>> entries=SimpleTableModel::from(attrs) mode=TableMode::CompactNoBorders header=html_nested!{ <TableHeader> <TableColumn label="Key"/> <TableColumn label="Value"/> </TableHeader> } > </Table<SimpleTableModel<AttributeEntry>>> <h3>{"Payload"}</h3> { render_data(event) } </> }; }
// setup onerror let link = self.link.clone();
random_line_split
spy.rs
use crate::{backend::Backend, error::error}; use cloudevents::{ event::{Data, ExtensionValue}, AttributesReader, Event, }; use drogue_cloud_service_api::{EXT_APPLICATION, EXT_DEVICE}; use itertools::Itertools; use patternfly_yew::*; use unicode_segmentation::UnicodeSegmentation; use wasm_bindgen::{closure::Closure, JsValue}; use web_sys::{EventSource, EventSourceInit}; use yew::prelude::*; pub struct Spy { link: ComponentLink<Self>, source: Option<EventSource>, events: SharedTableModel<Entry>, application: String, running: bool, total_received: usize, } pub enum Msg { Start(Option<String>), StartPressed, Stop, Event(Box<Event>), /// Failed when processing an event Error(String), /// Source failed Failed, SetApplication(String), } const DEFAULT_MAX_SIZE: usize = 200; #[derive(Clone, Debug, PartialEq)] pub struct Entry(pub Event); impl TableRenderer for Entry { fn render(&self, col: ColumnIndex) -> Html { match col.index { // timestamp 0 => render_timestamp(&self.0), // device id 1 => self.device().into(), // payload 2 => render_data_short(&self.0), // ignore _ => html! {}, } } fn render_details(&self) -> Vec<Span> { vec![Span::max(render_details(&self.0)).truncate()] } } impl Entry { fn device(&self) -> String { let app_id = self.extension_as_string(EXT_APPLICATION); let device_id = self.extension_as_string(EXT_DEVICE); format!("{} / {}", app_id, device_id) } fn extension_as_string(&self, name: &str) -> String { self.0 .extension(name) .map(|s| match s { ExtensionValue::String(s) => s.clone(), ExtensionValue::Integer(i) => i.to_string(), ExtensionValue::Boolean(true) => "true".into(), ExtensionValue::Boolean(false) => "false".into(), }) .unwrap_or_default() } } impl Component for Spy { type Message = Msg; type Properties = (); fn create(_props: Self::Properties, link: ComponentLink<Self>) -> Self { Self { events: Default::default(), link, source: None, running: false, total_received: 0, application: String::new(), } } fn update(&mut self, msg: Self::Message) -> ShouldRender { match msg { Msg::Start(app_id) => { log::info!("Starting: {:?}", app_id); self.start(app_id); } Msg::StartPressed => { self.link.send_message(Msg::Start(self.app_id_filter())); } Msg::Stop => { self.stop(); } Msg::Event(event) => { // log::debug!("Pushing event: {:?}", event); self.total_received += 1; self.events.insert(0, Entry(*event)); while self.events.len() > DEFAULT_MAX_SIZE { self.events.pop(); } } Msg::Error(err) => { error("Failed to process event", err); } Msg::Failed => { error("Source error", "Failed to connect to the event source"); self.running = false; } Msg::SetApplication(application) => { self.application = application; } } true } fn change(&mut self, _props: Self::Properties) -> ShouldRender { false } fn view(&self) -> Html { let is_valid = self.app_id_filter().is_some(); let is_running = self.running; let v = |value: &str| match value { "" => InputState::Error, _ => InputState::Default, }; return html! { <> <PageSection variant=PageSectionVariant::Light limit_width=true> <Content> <Title>{"Device Message Spy"}</Title> </Content> </PageSection> <PageSection> <Toolbar> <ToolbarGroup> <ToolbarItem> <TextInput disabled=self.running onchange=self.link.callback(|app|Msg::SetApplication(app)) validator=Validator::from(v) placeholder="Application ID to spy on"/> </ToolbarItem> <ToolbarItem> {if is_running { html!{<Button disabled=!is_valid label="Stop" icon=Icon::Pause variant=Variant::Secondary onclick=self.link.callback(|_|Msg::Stop) />} } else { html!{<Button disabled=!is_valid label="Start" icon=Icon::Play variant=Variant::Primary onclick=self.link.callback(|_|Msg::StartPressed) />} }} </ToolbarItem> </ToolbarGroup> <ToolbarItem modifiers=vec![ToolbarElementModifier::Right.all()]> { if self.running { html!{ <strong>{"events received: "}{self.total_received}</strong> } } else { html!{} } } </ToolbarItem> </Toolbar> <Table<SharedTableModel<Entry>> entries=self.events.clone() mode=TableMode::CompactExpandable header={html_nested!{ <TableHeader> <TableColumn label="Timestamp (UTC)"/> <TableColumn label="Device ID"/> <TableColumn label="Payload"/> </TableHeader> }} > </Table<SharedTableModel<Entry>>> { if self.events.is_empty() { self.render_empty() } else { html!{} }} </PageSection> </> }; } fn destroy(&mut self) { if let Some(source) = self.source.take() { source.close(); } } } impl Spy { fn app_id_filter(&self) -> Option<String> { let value = self.application.clone(); match value.is_empty() { true => None, false => Some(value), } } fn start(&mut self, app_id: Option<String>) { let mut url = Backend::url("/api/console/v1alpha1/spy").unwrap(); // add optional filter if let Some(app_id) = &app_id { url.query_pairs_mut().append_pair("app", app_id); } // EventSource doesn't support passing headers, so we cannot send // the bearer token the normal way url.query_pairs_mut() .append_pair("token", &Backend::access_token().unwrap_or_default()); // create source let source = EventSource::new_with_event_source_init_dict(&url.to_string(), &EventSourceInit::new()) .unwrap(); // setup onmessage let link = self.link.clone(); let on_message = Closure::wrap(Box::new(move |msg: &JsValue| { let msg = extract_event(msg); link.send_message(msg); }) as Box<dyn FnMut(&JsValue)>); source.set_onmessage(Some(&on_message.into_js_value().into())); // setup onerror let link = self.link.clone(); let on_error = Closure::wrap(Box::new(move || { link.send_message(Msg::Failed); }) as Box<dyn FnMut()>); source.set_onerror(Some(&on_error.into_js_value().into())); // store result self.running = true; self.source = Some(source); } fn stop(&mut self) { if let Some(source) = self.source.take() { source.close(); } self.running = false } fn render_empty(&self) -> Html { return html! { <div style="padding-bottom: 10rem; height: 100%;"> <Bullseye> <EmptyState title="No new messages" icon=Icon::Pending size=Size::XLarge > { "The " } <q> {"message spy"} </q> { " will only show "} <strong> {"new"} </strong> {" messages received by the system. When the next message arrives, you will see it right here." } </EmptyState> </Bullseye> </div> }; } } fn extract_event(msg: &JsValue) -> Msg { // web_sys::console::debug_2(&JsValue::from("event: "), msg); let data: String = js_sys::Reflect::get(msg, &JsValue::from("data")) .unwrap() .as_string() .unwrap(); match serde_json::from_str(&data) { Ok(event) => Msg::Event(event), Err(e) => Msg::Error(e.to_string()), } } fn render_data(event: &Event) -> Html { // let data: Option<Data> = event.get_data(); match event.data() { None => html! {}, Some(Data::String(text)) => html! { <pre> {text} </pre> }, Some(Data::Binary(blob)) => html! { <> <pre> { pretty_hex::pretty_hex(&blob) } </pre> <pre> { base64_block(&blob) } </pre> </> }, Some(Data::Json(value)) => { let value = serde_json::to_string_pretty(&value).unwrap(); return html! { <pre> {value} </pre> }; } } } fn base64_block(input: &[u8]) -> String { base64::encode(input) .chars() .collect::<Vec<_>>() .chunks(120) .map(|chunk| chunk.iter().collect::<String>()) .join("\n") } fn render_blob(blob: &[u8]) -> String { let max = blob.len().min(25); let ellipsis = if blob.len() > max { ", …" } else { "" }; format!("[{}; {:02x?}{}]", blob.len(), &blob[0..max], ellipsis) } fn truncate_str(len: usize, string: &str) -> String { let mut r = String::new(); for c in string.graphemes(true) { if r.len() > len || r.contains('\n') || r.contains('\r') { r.push('…'); break; } r.push_str(c); } r } fn render_data_short(event: &Event) -> Html { match event.data() { None => html! {}, Some(Data::String(text)) => html! { <pre> <Label label="String" color=Color::Purple/>{" "}{truncate_str(100, text)} </pre> }, Some(Data::Binary(blob)) => html! { <pre> <Label label="BLOB" color=Color::Blue/>{" "}{render_blob(&blob)} </pre> }, Some(Data::Json(value)) => html! { <pre> <Label label="JSON" color=Color::Cyan/>{" "}{truncate_str(100, &value.to_string())} </pre> }, } } fn render_timestamp(event: &Event) -> Html { event .time() .map(|ts| { return html! { <span> <pre>{ts.format("%H:%M:%S%.3f %Y-%m-%d")}</pre> </span> }; }) .unwrap_or_default() } #[derive(Clone, Debug, PartialEq)] struct Attr
String, pub Html); impl TableRenderer for AttributeEntry { fn render(&self, index: ColumnIndex) -> Html { match index.index { 0 => html! {&self.0}, 1 => self.1.clone(), _ => html! {}, } } } fn render_details(event: &Event) -> Html { let mut attrs: Vec<AttributeEntry> = event .iter() .map(|(key, value)| { ( key.to_string(), html! { <pre class="pf-c-table__text">{ value.to_string() }</pre> }, ) }) .map(|(key, value)| AttributeEntry(key, value)) .collect(); attrs.sort_by(|a, b| a.0.cmp(&b.0)); return html! { <> <h3>{"Attributes"}</h3> <Table<SimpleTableModel<AttributeEntry>> entries=SimpleTableModel::from(attrs) mode=TableMode::CompactNoBorders header=html_nested!{ <TableHeader> <TableColumn label="Key"/> <TableColumn label="Value"/> </TableHeader> } > </Table<SimpleTableModel<AttributeEntry>>> <h3>{"Payload"}</h3> { render_data(event) } </> }; }
ibuteEntry(pub
identifier_name
service.rs
use std::io::Read; use std::sync::{Arc, Mutex}; use std::collections::HashMap; use futures::{self, Future, BoxFuture}; use curl::easy::{Easy, List}; use tokio_core::reactor::Handle; use tokio_curl::{Session, PerformError}; use serde_json::{from_value, from_str, Value}; pub type Fut<T> = BoxFuture<T, PerformError>; #[derive(Debug)] pub struct App { pub name: String, pub max_mem_usage: f64, pub max_cpu_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f64, cpus_system_time_secs: f64, cpus_user_time_secs: f64, mem_limit_bytes: i64, mem_rss_bytes: i64, timestamp: f64, } pub struct Service { handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64, } impl Service { pub fn new(handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64) -> Service { Service { handle: handle, marathon_url: marathon_url, mesos_url: mesos_url, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, multiplier: multiplier, max_instances: max_instances, } } pub fn get_apps(&mut self) -> Fut<Vec<String>> { let url = format!("{}/v2/apps", &self.marathon_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["apps"].as_array().unwrap(); let mut apps = Vec::new(); for x in data.iter() { let id = x["id"].as_str().unwrap(); apps.push(id[1..].to_string()); } apps }).boxed() } pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> { let url = format!("{}/v2/apps/{}", &self.marathon_url, &app); let app = app.to_string(); let mut max_instances = self.max_instances.clone(); let mut max_mem_usage = self.max_mem_usage.clone(); let mut max_cpu_usage = self.max_cpu_usage.clone(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let instances = data.pointer("/app/instances").unwrap(); let instances = instances.as_i64().unwrap(); let labels = data.pointer("/app/labels").unwrap(); let labels = labels.as_object().unwrap(); for (label, value) in labels { match (label.as_ref(), value) { ("AUTOSCALE_MAX_INSTANCES", v) => { max_instances = from_value(v.clone()).unwrap(); } ("AUTOSCALE_MEM_PERCENT", v) => { max_mem_usage = from_value(v.clone()).unwrap(); } ("AUTOSCALE_CPU_PERCENT", v) => { max_cpu_usage = from_value(v.clone()).unwrap(); } _ => {} } } let xs = data.pointer("/app/tasks").unwrap(); let xs = xs.as_array().unwrap(); let mut tasks = HashMap::new(); for x in xs.iter() { let id = x["id"].as_str().unwrap(); let slave_id = x["slaveId"].as_str().unwrap(); tasks.insert(id.clone().to_string(), slave_id.clone().to_string()); } Some(App { name: app, max_instances: max_instances, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, instances: instances, tasks: tasks, }) }).boxed() } pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> { let url = format!("{}/master/slaves", &self.mesos_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["slaves"].as_array().unwrap(); let mut slaves = HashMap::new(); for slave in data.iter() { let id = slave["id"].as_str().unwrap(); let hostname = slave["hostname"].as_str().unwrap(); let port = slave["port"].as_i64().unwrap(); let addr = format!("{}:{}", hostname, port); slaves.insert(id.clone().to_string(), addr.to_string()); } slaves }).boxed() } pub fn get_statistic(&mut self, app: &App, slaves: &HashMap<String, String>, prev: Option<&Statistic>) -> Fut<Statistic> { let mut futs = Vec::new(); for (id, slave_id) in &app.tasks { let url = slaves.get::<String>(&slave_id).unwrap().to_string(); futs.push(self.get_task_statistic(url, id)); } let mut prev_timestamp = 0.0; let mut prev_cpu_time = 0.0; if let Some(p) = prev { prev_timestamp = p.timestamp; prev_cpu_time = p.cpu_time; } futures::collect(futs).map(move |tasks| { let mut mems: Vec<f64> = Vec::new(); let mut cpus: Vec<f64> = Vec::new();
if task.is_none() { continue; } let task = task.unwrap(); timestamp = task.timestamp; cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs); mems.push(100.0 * task.mem_rss_bytes as f64 / task.mem_limit_bytes as f64); } let mem_usage = mems.iter() .fold(0.0, |a, &b| a + b) / mems.len() as f64; let cpu_time = cpus.iter() .fold(0.0, |a, &b| a + b) / cpus.len() as f64; let sampling_duration = timestamp - prev_timestamp; let cpu_time_usage = cpu_time - prev_cpu_time; let cpu_usage = cpu_time_usage / sampling_duration * 100.0; Statistic { timestamp: timestamp, cpu_time: cpu_time, mem_usage: mem_usage, cpu_usage: cpu_usage, } }).boxed() } pub fn scale(&mut self, app: &App) -> Fut<()> { let instances = (app.instances as f64 * self.multiplier).ceil() as i64; if instances > app.max_instances { info!("Cannot scale {}, reached maximum instances of: {}", app.name, app.max_instances); return futures::done(Ok(())).boxed(); } let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name); let body = format!(r#"{{"instances": {}}}"#, instances); let session = Session::new(self.handle.clone()); let mut req = Easy::new(); req.url(&url).unwrap(); req.put(true).unwrap(); let mut list = List::new(); list.append("Content-Type: application/json").unwrap(); req.http_headers(list).unwrap(); req.post_field_size(body.as_bytes().len() as u64).unwrap(); req.read_function(move |buf| { let mut data = body.as_bytes(); Ok(data.read(buf).unwrap_or(0)) }).unwrap(); session.perform(req).map(|mut r| { info!("Scaling response code: {}", r.response_code().unwrap()); }).boxed() } fn get_task_statistic(&mut self, slave: String, id: &str) -> Fut<Option<TaskStatistic>> { let url = format!("http://{}/monitor/statistics", &slave); let id = id.to_string(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let data = data.as_array().unwrap(); data.iter().find(|x| { x["executor_id"].as_str().unwrap() == id }).map(|x| { from_value(x["statistics"].clone()).unwrap() }) }).boxed() } fn send_get(&mut self, url: &str) -> Fut<String> { let session = Session::new(self.handle.clone()); let response = Arc::new(Mutex::new(Vec::new())); let headers = Arc::new(Mutex::new(Vec::new())); let mut req = Easy::new(); req.get(true).unwrap(); req.url(url).unwrap(); let response2 = response.clone(); req.write_function(move |data| { response2.lock().unwrap().extend_from_slice(data); Ok(data.len()) }).unwrap(); let headers2 = headers.clone(); req.header_function(move |header| { headers2.lock().unwrap().push(header.to_vec()); true }).unwrap(); session.perform(req).map(move |_| { let response = response.lock().unwrap(); let response = String::from_utf8_lossy(&response); response.into_owned() }).boxed() } } #[cfg(test)] mod tests { use tokio_core::reactor::Core; #[test] #[ignore] fn test() { let marathon_url = "http://localhost:8080"; let mesos_url = "http://localhost:5050"; let mut evloop = Core::new().unwrap(); let mut service = ::Service::new(evloop.handle(), marathon_url.to_string(), mesos_url.to_string(), 80.0, 80.0, 1.5, 10); let fut = service.get_slaves(); let slaves = evloop.run(fut).unwrap(); let fut = service.get_apps(); let apps = evloop.run(fut).unwrap(); for id in apps { let fut = service.get_app(&id); let app = evloop.run(fut).unwrap().unwrap(); let fut = service.get_statistic(&app, &slaves, None); let stat = evloop.run(fut).unwrap(); if app.name == "api" { let fut = service.scale(&app); evloop.run(fut).unwrap(); } } } }
let mut timestamp: f64 = 0.0; for task in tasks {
random_line_split
service.rs
use std::io::Read; use std::sync::{Arc, Mutex}; use std::collections::HashMap; use futures::{self, Future, BoxFuture}; use curl::easy::{Easy, List}; use tokio_core::reactor::Handle; use tokio_curl::{Session, PerformError}; use serde_json::{from_value, from_str, Value}; pub type Fut<T> = BoxFuture<T, PerformError>; #[derive(Debug)] pub struct App { pub name: String, pub max_mem_usage: f64, pub max_cpu_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f64, cpus_system_time_secs: f64, cpus_user_time_secs: f64, mem_limit_bytes: i64, mem_rss_bytes: i64, timestamp: f64, } pub struct Service { handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64, } impl Service { pub fn new(handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64) -> Service { Service { handle: handle, marathon_url: marathon_url, mesos_url: mesos_url, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, multiplier: multiplier, max_instances: max_instances, } } pub fn get_apps(&mut self) -> Fut<Vec<String>> { let url = format!("{}/v2/apps", &self.marathon_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["apps"].as_array().unwrap(); let mut apps = Vec::new(); for x in data.iter() { let id = x["id"].as_str().unwrap(); apps.push(id[1..].to_string()); } apps }).boxed() } pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> { let url = format!("{}/v2/apps/{}", &self.marathon_url, &app); let app = app.to_string(); let mut max_instances = self.max_instances.clone(); let mut max_mem_usage = self.max_mem_usage.clone(); let mut max_cpu_usage = self.max_cpu_usage.clone(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let instances = data.pointer("/app/instances").unwrap(); let instances = instances.as_i64().unwrap(); let labels = data.pointer("/app/labels").unwrap(); let labels = labels.as_object().unwrap(); for (label, value) in labels { match (label.as_ref(), value) { ("AUTOSCALE_MAX_INSTANCES", v) => { max_instances = from_value(v.clone()).unwrap(); } ("AUTOSCALE_MEM_PERCENT", v) => { max_mem_usage = from_value(v.clone()).unwrap(); } ("AUTOSCALE_CPU_PERCENT", v) => { max_cpu_usage = from_value(v.clone()).unwrap(); } _ => {} } } let xs = data.pointer("/app/tasks").unwrap(); let xs = xs.as_array().unwrap(); let mut tasks = HashMap::new(); for x in xs.iter() { let id = x["id"].as_str().unwrap(); let slave_id = x["slaveId"].as_str().unwrap(); tasks.insert(id.clone().to_string(), slave_id.clone().to_string()); } Some(App { name: app, max_instances: max_instances, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, instances: instances, tasks: tasks, }) }).boxed() } pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> { let url = format!("{}/master/slaves", &self.mesos_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["slaves"].as_array().unwrap(); let mut slaves = HashMap::new(); for slave in data.iter() { let id = slave["id"].as_str().unwrap(); let hostname = slave["hostname"].as_str().unwrap(); let port = slave["port"].as_i64().unwrap(); let addr = format!("{}:{}", hostname, port); slaves.insert(id.clone().to_string(), addr.to_string()); } slaves }).boxed() } pub fn get_statistic(&mut self, app: &App, slaves: &HashMap<String, String>, prev: Option<&Statistic>) -> Fut<Statistic>
for task in tasks { if task.is_none() { continue; } let task = task.unwrap(); timestamp = task.timestamp; cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs); mems.push(100.0 * task.mem_rss_bytes as f64 / task.mem_limit_bytes as f64); } let mem_usage = mems.iter() .fold(0.0, |a, &b| a + b) / mems.len() as f64; let cpu_time = cpus.iter() .fold(0.0, |a, &b| a + b) / cpus.len() as f64; let sampling_duration = timestamp - prev_timestamp; let cpu_time_usage = cpu_time - prev_cpu_time; let cpu_usage = cpu_time_usage / sampling_duration * 100.0; Statistic { timestamp: timestamp, cpu_time: cpu_time, mem_usage: mem_usage, cpu_usage: cpu_usage, } }).boxed() } pub fn scale(&mut self, app: &App) -> Fut<()> { let instances = (app.instances as f64 * self.multiplier).ceil() as i64; if instances > app.max_instances { info!("Cannot scale {}, reached maximum instances of: {}", app.name, app.max_instances); return futures::done(Ok(())).boxed(); } let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name); let body = format!(r#"{{"instances": {}}}"#, instances); let session = Session::new(self.handle.clone()); let mut req = Easy::new(); req.url(&url).unwrap(); req.put(true).unwrap(); let mut list = List::new(); list.append("Content-Type: application/json").unwrap(); req.http_headers(list).unwrap(); req.post_field_size(body.as_bytes().len() as u64).unwrap(); req.read_function(move |buf| { let mut data = body.as_bytes(); Ok(data.read(buf).unwrap_or(0)) }).unwrap(); session.perform(req).map(|mut r| { info!("Scaling response code: {}", r.response_code().unwrap()); }).boxed() } fn get_task_statistic(&mut self, slave: String, id: &str) -> Fut<Option<TaskStatistic>> { let url = format!("http://{}/monitor/statistics", &slave); let id = id.to_string(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let data = data.as_array().unwrap(); data.iter().find(|x| { x["executor_id"].as_str().unwrap() == id }).map(|x| { from_value(x["statistics"].clone()).unwrap() }) }).boxed() } fn send_get(&mut self, url: &str) -> Fut<String> { let session = Session::new(self.handle.clone()); let response = Arc::new(Mutex::new(Vec::new())); let headers = Arc::new(Mutex::new(Vec::new())); let mut req = Easy::new(); req.get(true).unwrap(); req.url(url).unwrap(); let response2 = response.clone(); req.write_function(move |data| { response2.lock().unwrap().extend_from_slice(data); Ok(data.len()) }).unwrap(); let headers2 = headers.clone(); req.header_function(move |header| { headers2.lock().unwrap().push(header.to_vec()); true }).unwrap(); session.perform(req).map(move |_| { let response = response.lock().unwrap(); let response = String::from_utf8_lossy(&response); response.into_owned() }).boxed() } } #[cfg(test)] mod tests { use tokio_core::reactor::Core; #[test] #[ignore] fn test() { let marathon_url = "http://localhost:8080"; let mesos_url = "http://localhost:5050"; let mut evloop = Core::new().unwrap(); let mut service = ::Service::new(evloop.handle(), marathon_url.to_string(), mesos_url.to_string(), 80.0, 80.0, 1.5, 10); let fut = service.get_slaves(); let slaves = evloop.run(fut).unwrap(); let fut = service.get_apps(); let apps = evloop.run(fut).unwrap(); for id in apps { let fut = service.get_app(&id); let app = evloop.run(fut).unwrap().unwrap(); let fut = service.get_statistic(&app, &slaves, None); let stat = evloop.run(fut).unwrap(); if app.name == "api" { let fut = service.scale(&app); evloop.run(fut).unwrap(); } } } }
{ let mut futs = Vec::new(); for (id, slave_id) in &app.tasks { let url = slaves.get::<String>(&slave_id).unwrap().to_string(); futs.push(self.get_task_statistic(url, id)); } let mut prev_timestamp = 0.0; let mut prev_cpu_time = 0.0; if let Some(p) = prev { prev_timestamp = p.timestamp; prev_cpu_time = p.cpu_time; } futures::collect(futs).map(move |tasks| { let mut mems: Vec<f64> = Vec::new(); let mut cpus: Vec<f64> = Vec::new(); let mut timestamp: f64 = 0.0;
identifier_body
service.rs
use std::io::Read; use std::sync::{Arc, Mutex}; use std::collections::HashMap; use futures::{self, Future, BoxFuture}; use curl::easy::{Easy, List}; use tokio_core::reactor::Handle; use tokio_curl::{Session, PerformError}; use serde_json::{from_value, from_str, Value}; pub type Fut<T> = BoxFuture<T, PerformError>; #[derive(Debug)] pub struct App { pub name: String, pub max_mem_usage: f64, pub max_cpu_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f64, cpus_system_time_secs: f64, cpus_user_time_secs: f64, mem_limit_bytes: i64, mem_rss_bytes: i64, timestamp: f64, } pub struct
{ handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64, } impl Service { pub fn new(handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64) -> Service { Service { handle: handle, marathon_url: marathon_url, mesos_url: mesos_url, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, multiplier: multiplier, max_instances: max_instances, } } pub fn get_apps(&mut self) -> Fut<Vec<String>> { let url = format!("{}/v2/apps", &self.marathon_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["apps"].as_array().unwrap(); let mut apps = Vec::new(); for x in data.iter() { let id = x["id"].as_str().unwrap(); apps.push(id[1..].to_string()); } apps }).boxed() } pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> { let url = format!("{}/v2/apps/{}", &self.marathon_url, &app); let app = app.to_string(); let mut max_instances = self.max_instances.clone(); let mut max_mem_usage = self.max_mem_usage.clone(); let mut max_cpu_usage = self.max_cpu_usage.clone(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let instances = data.pointer("/app/instances").unwrap(); let instances = instances.as_i64().unwrap(); let labels = data.pointer("/app/labels").unwrap(); let labels = labels.as_object().unwrap(); for (label, value) in labels { match (label.as_ref(), value) { ("AUTOSCALE_MAX_INSTANCES", v) => { max_instances = from_value(v.clone()).unwrap(); } ("AUTOSCALE_MEM_PERCENT", v) => { max_mem_usage = from_value(v.clone()).unwrap(); } ("AUTOSCALE_CPU_PERCENT", v) => { max_cpu_usage = from_value(v.clone()).unwrap(); } _ => {} } } let xs = data.pointer("/app/tasks").unwrap(); let xs = xs.as_array().unwrap(); let mut tasks = HashMap::new(); for x in xs.iter() { let id = x["id"].as_str().unwrap(); let slave_id = x["slaveId"].as_str().unwrap(); tasks.insert(id.clone().to_string(), slave_id.clone().to_string()); } Some(App { name: app, max_instances: max_instances, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, instances: instances, tasks: tasks, }) }).boxed() } pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> { let url = format!("{}/master/slaves", &self.mesos_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["slaves"].as_array().unwrap(); let mut slaves = HashMap::new(); for slave in data.iter() { let id = slave["id"].as_str().unwrap(); let hostname = slave["hostname"].as_str().unwrap(); let port = slave["port"].as_i64().unwrap(); let addr = format!("{}:{}", hostname, port); slaves.insert(id.clone().to_string(), addr.to_string()); } slaves }).boxed() } pub fn get_statistic(&mut self, app: &App, slaves: &HashMap<String, String>, prev: Option<&Statistic>) -> Fut<Statistic> { let mut futs = Vec::new(); for (id, slave_id) in &app.tasks { let url = slaves.get::<String>(&slave_id).unwrap().to_string(); futs.push(self.get_task_statistic(url, id)); } let mut prev_timestamp = 0.0; let mut prev_cpu_time = 0.0; if let Some(p) = prev { prev_timestamp = p.timestamp; prev_cpu_time = p.cpu_time; } futures::collect(futs).map(move |tasks| { let mut mems: Vec<f64> = Vec::new(); let mut cpus: Vec<f64> = Vec::new(); let mut timestamp: f64 = 0.0; for task in tasks { if task.is_none() { continue; } let task = task.unwrap(); timestamp = task.timestamp; cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs); mems.push(100.0 * task.mem_rss_bytes as f64 / task.mem_limit_bytes as f64); } let mem_usage = mems.iter() .fold(0.0, |a, &b| a + b) / mems.len() as f64; let cpu_time = cpus.iter() .fold(0.0, |a, &b| a + b) / cpus.len() as f64; let sampling_duration = timestamp - prev_timestamp; let cpu_time_usage = cpu_time - prev_cpu_time; let cpu_usage = cpu_time_usage / sampling_duration * 100.0; Statistic { timestamp: timestamp, cpu_time: cpu_time, mem_usage: mem_usage, cpu_usage: cpu_usage, } }).boxed() } pub fn scale(&mut self, app: &App) -> Fut<()> { let instances = (app.instances as f64 * self.multiplier).ceil() as i64; if instances > app.max_instances { info!("Cannot scale {}, reached maximum instances of: {}", app.name, app.max_instances); return futures::done(Ok(())).boxed(); } let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name); let body = format!(r#"{{"instances": {}}}"#, instances); let session = Session::new(self.handle.clone()); let mut req = Easy::new(); req.url(&url).unwrap(); req.put(true).unwrap(); let mut list = List::new(); list.append("Content-Type: application/json").unwrap(); req.http_headers(list).unwrap(); req.post_field_size(body.as_bytes().len() as u64).unwrap(); req.read_function(move |buf| { let mut data = body.as_bytes(); Ok(data.read(buf).unwrap_or(0)) }).unwrap(); session.perform(req).map(|mut r| { info!("Scaling response code: {}", r.response_code().unwrap()); }).boxed() } fn get_task_statistic(&mut self, slave: String, id: &str) -> Fut<Option<TaskStatistic>> { let url = format!("http://{}/monitor/statistics", &slave); let id = id.to_string(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let data = data.as_array().unwrap(); data.iter().find(|x| { x["executor_id"].as_str().unwrap() == id }).map(|x| { from_value(x["statistics"].clone()).unwrap() }) }).boxed() } fn send_get(&mut self, url: &str) -> Fut<String> { let session = Session::new(self.handle.clone()); let response = Arc::new(Mutex::new(Vec::new())); let headers = Arc::new(Mutex::new(Vec::new())); let mut req = Easy::new(); req.get(true).unwrap(); req.url(url).unwrap(); let response2 = response.clone(); req.write_function(move |data| { response2.lock().unwrap().extend_from_slice(data); Ok(data.len()) }).unwrap(); let headers2 = headers.clone(); req.header_function(move |header| { headers2.lock().unwrap().push(header.to_vec()); true }).unwrap(); session.perform(req).map(move |_| { let response = response.lock().unwrap(); let response = String::from_utf8_lossy(&response); response.into_owned() }).boxed() } } #[cfg(test)] mod tests { use tokio_core::reactor::Core; #[test] #[ignore] fn test() { let marathon_url = "http://localhost:8080"; let mesos_url = "http://localhost:5050"; let mut evloop = Core::new().unwrap(); let mut service = ::Service::new(evloop.handle(), marathon_url.to_string(), mesos_url.to_string(), 80.0, 80.0, 1.5, 10); let fut = service.get_slaves(); let slaves = evloop.run(fut).unwrap(); let fut = service.get_apps(); let apps = evloop.run(fut).unwrap(); for id in apps { let fut = service.get_app(&id); let app = evloop.run(fut).unwrap().unwrap(); let fut = service.get_statistic(&app, &slaves, None); let stat = evloop.run(fut).unwrap(); if app.name == "api" { let fut = service.scale(&app); evloop.run(fut).unwrap(); } } } }
Service
identifier_name
service.rs
use std::io::Read; use std::sync::{Arc, Mutex}; use std::collections::HashMap; use futures::{self, Future, BoxFuture}; use curl::easy::{Easy, List}; use tokio_core::reactor::Handle; use tokio_curl::{Session, PerformError}; use serde_json::{from_value, from_str, Value}; pub type Fut<T> = BoxFuture<T, PerformError>; #[derive(Debug)] pub struct App { pub name: String, pub max_mem_usage: f64, pub max_cpu_usage: f64, pub max_instances: i64, pub instances: i64, pub tasks: HashMap<String, String>, } #[derive(Debug)] pub struct Statistic { pub timestamp: f64, pub cpu_time: f64, pub cpu_usage: f64, pub mem_usage: f64, } #[derive(Debug, Deserialize)] struct TaskStatistic { cpus_limit: f64, cpus_system_time_secs: f64, cpus_user_time_secs: f64, mem_limit_bytes: i64, mem_rss_bytes: i64, timestamp: f64, } pub struct Service { handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64, } impl Service { pub fn new(handle: Handle, marathon_url: String, mesos_url: String, max_mem_usage: f64, max_cpu_usage: f64, multiplier: f64, max_instances: i64) -> Service { Service { handle: handle, marathon_url: marathon_url, mesos_url: mesos_url, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, multiplier: multiplier, max_instances: max_instances, } } pub fn get_apps(&mut self) -> Fut<Vec<String>> { let url = format!("{}/v2/apps", &self.marathon_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["apps"].as_array().unwrap(); let mut apps = Vec::new(); for x in data.iter() { let id = x["id"].as_str().unwrap(); apps.push(id[1..].to_string()); } apps }).boxed() } pub fn get_app(&mut self, app: &str) -> Fut<Option<App>> { let url = format!("{}/v2/apps/{}", &self.marathon_url, &app); let app = app.to_string(); let mut max_instances = self.max_instances.clone(); let mut max_mem_usage = self.max_mem_usage.clone(); let mut max_cpu_usage = self.max_cpu_usage.clone(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let instances = data.pointer("/app/instances").unwrap(); let instances = instances.as_i64().unwrap(); let labels = data.pointer("/app/labels").unwrap(); let labels = labels.as_object().unwrap(); for (label, value) in labels { match (label.as_ref(), value) { ("AUTOSCALE_MAX_INSTANCES", v) =>
("AUTOSCALE_MEM_PERCENT", v) => { max_mem_usage = from_value(v.clone()).unwrap(); } ("AUTOSCALE_CPU_PERCENT", v) => { max_cpu_usage = from_value(v.clone()).unwrap(); } _ => {} } } let xs = data.pointer("/app/tasks").unwrap(); let xs = xs.as_array().unwrap(); let mut tasks = HashMap::new(); for x in xs.iter() { let id = x["id"].as_str().unwrap(); let slave_id = x["slaveId"].as_str().unwrap(); tasks.insert(id.clone().to_string(), slave_id.clone().to_string()); } Some(App { name: app, max_instances: max_instances, max_mem_usage: max_mem_usage, max_cpu_usage: max_cpu_usage, instances: instances, tasks: tasks, }) }).boxed() } pub fn get_slaves(&mut self) -> Fut<HashMap<String, String>> { let url = format!("{}/master/slaves", &self.mesos_url); self.send_get(&url).map(|body| { let data = from_str::<Value>(&body).unwrap(); let data = data["slaves"].as_array().unwrap(); let mut slaves = HashMap::new(); for slave in data.iter() { let id = slave["id"].as_str().unwrap(); let hostname = slave["hostname"].as_str().unwrap(); let port = slave["port"].as_i64().unwrap(); let addr = format!("{}:{}", hostname, port); slaves.insert(id.clone().to_string(), addr.to_string()); } slaves }).boxed() } pub fn get_statistic(&mut self, app: &App, slaves: &HashMap<String, String>, prev: Option<&Statistic>) -> Fut<Statistic> { let mut futs = Vec::new(); for (id, slave_id) in &app.tasks { let url = slaves.get::<String>(&slave_id).unwrap().to_string(); futs.push(self.get_task_statistic(url, id)); } let mut prev_timestamp = 0.0; let mut prev_cpu_time = 0.0; if let Some(p) = prev { prev_timestamp = p.timestamp; prev_cpu_time = p.cpu_time; } futures::collect(futs).map(move |tasks| { let mut mems: Vec<f64> = Vec::new(); let mut cpus: Vec<f64> = Vec::new(); let mut timestamp: f64 = 0.0; for task in tasks { if task.is_none() { continue; } let task = task.unwrap(); timestamp = task.timestamp; cpus.push(task.cpus_user_time_secs + task.cpus_system_time_secs); mems.push(100.0 * task.mem_rss_bytes as f64 / task.mem_limit_bytes as f64); } let mem_usage = mems.iter() .fold(0.0, |a, &b| a + b) / mems.len() as f64; let cpu_time = cpus.iter() .fold(0.0, |a, &b| a + b) / cpus.len() as f64; let sampling_duration = timestamp - prev_timestamp; let cpu_time_usage = cpu_time - prev_cpu_time; let cpu_usage = cpu_time_usage / sampling_duration * 100.0; Statistic { timestamp: timestamp, cpu_time: cpu_time, mem_usage: mem_usage, cpu_usage: cpu_usage, } }).boxed() } pub fn scale(&mut self, app: &App) -> Fut<()> { let instances = (app.instances as f64 * self.multiplier).ceil() as i64; if instances > app.max_instances { info!("Cannot scale {}, reached maximum instances of: {}", app.name, app.max_instances); return futures::done(Ok(())).boxed(); } let url = format!("{}/v2/apps/{}", &self.marathon_url, &app.name); let body = format!(r#"{{"instances": {}}}"#, instances); let session = Session::new(self.handle.clone()); let mut req = Easy::new(); req.url(&url).unwrap(); req.put(true).unwrap(); let mut list = List::new(); list.append("Content-Type: application/json").unwrap(); req.http_headers(list).unwrap(); req.post_field_size(body.as_bytes().len() as u64).unwrap(); req.read_function(move |buf| { let mut data = body.as_bytes(); Ok(data.read(buf).unwrap_or(0)) }).unwrap(); session.perform(req).map(|mut r| { info!("Scaling response code: {}", r.response_code().unwrap()); }).boxed() } fn get_task_statistic(&mut self, slave: String, id: &str) -> Fut<Option<TaskStatistic>> { let url = format!("http://{}/monitor/statistics", &slave); let id = id.to_string(); self.send_get(&url).map(move |body| { let data = from_str::<Value>(&body).unwrap(); let data = data.as_array().unwrap(); data.iter().find(|x| { x["executor_id"].as_str().unwrap() == id }).map(|x| { from_value(x["statistics"].clone()).unwrap() }) }).boxed() } fn send_get(&mut self, url: &str) -> Fut<String> { let session = Session::new(self.handle.clone()); let response = Arc::new(Mutex::new(Vec::new())); let headers = Arc::new(Mutex::new(Vec::new())); let mut req = Easy::new(); req.get(true).unwrap(); req.url(url).unwrap(); let response2 = response.clone(); req.write_function(move |data| { response2.lock().unwrap().extend_from_slice(data); Ok(data.len()) }).unwrap(); let headers2 = headers.clone(); req.header_function(move |header| { headers2.lock().unwrap().push(header.to_vec()); true }).unwrap(); session.perform(req).map(move |_| { let response = response.lock().unwrap(); let response = String::from_utf8_lossy(&response); response.into_owned() }).boxed() } } #[cfg(test)] mod tests { use tokio_core::reactor::Core; #[test] #[ignore] fn test() { let marathon_url = "http://localhost:8080"; let mesos_url = "http://localhost:5050"; let mut evloop = Core::new().unwrap(); let mut service = ::Service::new(evloop.handle(), marathon_url.to_string(), mesos_url.to_string(), 80.0, 80.0, 1.5, 10); let fut = service.get_slaves(); let slaves = evloop.run(fut).unwrap(); let fut = service.get_apps(); let apps = evloop.run(fut).unwrap(); for id in apps { let fut = service.get_app(&id); let app = evloop.run(fut).unwrap().unwrap(); let fut = service.get_statistic(&app, &slaves, None); let stat = evloop.run(fut).unwrap(); if app.name == "api" { let fut = service.scale(&app); evloop.run(fut).unwrap(); } } } }
{ max_instances = from_value(v.clone()).unwrap(); }
conditional_block
ctx.rs
//! The ØMQ context type. use crate::{auth::server::AuthServer, error::*}; use libzmq_sys as sys; use sys::errno; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::{ os::raw::{c_int, c_void}, ptr, str, sync::Arc, thread, }; lazy_static! { static ref GLOBAL_CONTEXT: Ctx = Ctx::new(); } #[derive(Copy, Clone, Debug)] enum RawCtxOption { IOThreads, MaxSockets, MaxMsgSize, SocketLimit, IPV6, Blocky, } impl From<RawCtxOption> for c_int { fn from(r: RawCtxOption) -> c_int { match r { RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int, RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int, RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int, RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int, RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int, RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int, } } } #[derive(Debug)] struct RawCtx { ctx: *mut c_void, } impl RawCtx { fn get(&self, option: RawCtxOption) -> i32 { unsafe { sys::zmq_ctx_get(self.ctx, option.into()) } } fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> { let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) }; if rc == -1 { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput { msg: "invalid value", })), _ => panic!(msg_from_errno(errno)), } } else { Ok(()) } } fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> { self.set(opt, flag as i32) } fn get_bool(&self, opt: RawCtxOption) -> bool { let flag = self.get(opt); flag!= 0 } fn terminate(&self) { // We loop in case `zmq_ctx_term` get interrupted by a signal. loop { let rc = unsafe { sys::zmq_ctx_term(self.ctx) }; if rc == 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) }; // Should never fail. assert_eq!(rc, 0); } } // The `zmq_ctx` is internally threadsafe. unsafe impl Send for RawCtx {} unsafe impl Sync for RawCtx {} impl Drop for RawCtx { fn drop(&mut self) { self.terminate() } } impl PartialEq for RawCtx { /// Compares the two underlying raw C pointers. fn eq(&self, other: &Self) -> bool { ptr::eq(self.ctx, other.ctx) } } impl Eq for RawCtx {} impl Default for RawCtx { fn default() -> Self { let ctx = unsafe { sys::zmq_ctx_new() }; if ctx.is_null() { panic!(msg_from_errno(unsafe { sys::zmq_errno() })); } Self { ctx } } } /// A config for a [`Ctx`]. /// /// Usefull in configuration files. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxConfig { io_threads: Option<i32>, max_msg_size: Option<i32>, max_sockets: Option<i32>, no_linger: Option<bool>, } impl CtxConfig { pub fn new() -> Self { Self::default() } pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { if let Some(value) = self.io_threads { ctx.set_io_threads(value)?; } if let Some(value) = self.max_sockets { ctx.set_max_sockets(value)?; } if let Some(value) = self.max_msg_size { ctx.set_max_msg_size(value)?; } if let Some(value) = self.no_linger { ctx.set_no_linger(value)?; } Ok(()) } pub fn io_threads(&self) -> Option<i32> { self.io_threads } pub fn set_io_threads(&mut self, value: Option<i32>) { self.io_threads = value; } pub fn max_msg_size(&self) -> Option<i32> { self.max_msg_size } pub fn set_max_msg_size(&mut self, value: Option<i32>) { self.max_msg_size = value; } pub fn max_sockets(&mut self) -> Option<i32> { self.max_sockets } pub fn set_max_sockets(&mut self, value: Option<i32>) { self.max_sockets = value; } pub fn no_linger(&self) -> Option<bool> { self.no_linger } pub fn set_no_linger(&mut self, value: Option<bool>) { self.no_linger = value; } } /// A convenience builder for a [`Ctx`]. /// /// Makes complex context configuration more convenient. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxBuilder { inner: CtxConfig, } impl CtxBuilder { pub fn new() -> Self { Self::default() } /// Builds a `Ctx` from a `CtxBuilder`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let ctx = CtxBuilder::new() /// .io_threads(2) /// .no_linger() /// .build()?; /// /// assert_eq!(ctx.io_threads(), 2); /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } /// Applies a `CtxBuilder` to an existing `Ctx`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let global = Ctx::global(); /// /// CtxBuilder::new() /// .io_threads(0) /// .max_msg_size(420) /// .max_sockets(69) /// .no_linger() /// .apply(global)?; /// /// assert_eq!(global.io_threads(), 0); /// assert_eq!(global.max_msg_size(), 420); /// assert_eq!(global.no_linger(), true); /// assert_eq!(global.max_sockets(), 69); /// # /// # Ok(()) /// # } /// ``` pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { self.inner.apply(ctx) } /// See [`set_io_threads`]. /// /// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads pub fn io_threads(&mut self, value: i32) -> &mut Self { self.inner.set_io_threads(Some(value)); self } /// See [`set_max_msg_size`]. /// /// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size pub fn max_msg_size(&mut self, value: i32) -> &mut Self { self.inner.set_max_msg_size(Some(value)); self } /// See [`set_max_sockets`]. /// /// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets pub fn max_sockets(&mut self, value: i32) -> &mut Self { self.inner.set_max_sockets(Some(value)); self } /// See [`set_no_linger`]. /// /// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger pub fn no_linger(&mut self) -> &mut Self { self.inner.set_no_linger(Some(true)); self } } /// Keeps the list of sockets and manages the async I/O thread and /// internal queries. /// /// Each context also has an associated `AuthServer` which handles socket /// authentification. /// /// # Drop /// The context will call terminate when dropped which will cause all /// blocking calls to fail with `CtxTerminated`, then block until /// the following conditions are met: /// * All sockets open within context have been dropped. /// * All messages sent by the application with have either been physically /// transferred to a network peer, or the socket's linger period has expired. /// /// # Thread safety /// A ØMQ context is internally thread safe. /// /// # Multiple Contexts /// Multiple contexts are allowed but are considered exotic. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Ctx { raw: Arc<RawCtx>, } impl Ctx { /// Create a new ØMQ context. /// /// For almost all use cases, using and configuring the [`global`] context /// will be enought. /// /// See [`zmq_ctx_new`]. /// /// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new /// /// # Usage Example /// ``` /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// let cloned = ctx.clone(); /// /// assert_eq!(ctx, cloned); /// assert_ne!(ctx, Ctx::new()); /// ``` /// /// [`global`]: #method.global pub fn new() -> Self { let raw = Arc::new(RawCtx::default()); // Enable ipv6 by default. raw.set_bool(RawCtxOption::IPV6, true).unwrap(); let ctx = Self { raw }; // Start a `ZAP` handler for the context. let mut auth = AuthServer::with_ctx(&ctx).unwrap(); // This thread is guaranteed to terminate before the ctx // since it holds a `Arc` to it. No need to store & join the // thread handle. thread::spawn(move || auth.run()); ctx } /// Returns a reference to the global context. /// /// This is a singleton used by sockets created via their respective /// `::new()` method. It merely exists for convenience and is no different /// from a context obtained via `Ctx::new()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::{Ctx, Client}; /// /// // A socket created via `new` will use the global `Ctx`. /// let client = Client::new()?; /// assert_eq!(client.ctx(), Ctx::global()); /// # /// # Ok(()) /// # } /// ``` pub fn global() -> &'static Ctx { &GLOBAL_CONTEXT } /// Returns the size of the ØMQ thread pool for this context. pub fn io_threads(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::IOThreads) } /// Set the size of the ØMQ thread pool to handle I/O operations. /// /// "The general rule of thumb is to allow one I/O thread per gigabyte of /// data in or out per second." - [`Pieter Hintjens`] /// /// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads /// /// # Default /// The default value is `1`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.io_threads(), 1); /// /// // Lets say our app exclusively uses the inproc transport /// // for messaging. Then we dont need any I/O threads. /// ctx.set_io_threads(0)?; /// assert_eq!(ctx.io_threads(), 0); /// # /// # Ok(()) /// # } /// ``` pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads) } /// Returns the maximum number of sockets allowed for this context. pub fn max_sockets(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxSockets) } /// Sets the maximum number of sockets allowed on the context. /// /// # Default /// The default value is `1023`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_sockets(), 1023); /// /// ctx.set_max_sockets(420)?; /// assert_eq!(ctx.max_sockets(), 420); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxSockets, max) } /// Returns the maximum size of a message allowed for this context. pub fn max_msg_size(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxMsgSize) } /// Sets the maximum allowed size of a message sent in the context. /// /// # Default /// The default value is `i32::max_value()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_msg_size(), i32::max_value()); /// /// ctx.set_max_msg_size(i32::max_value() - 1)?; /// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size) } /// Returns the largest number of sockets that the context will accept. pub fn socket_limit(&self) -> i32 {
/// A value of `true` indicates that all new sockets are given a /// linger timeout of zero. /// pub fn no_linger(&self) -> bool { !self.raw.as_ref().get_bool(RawCtxOption::Blocky) } /// When set to `true`, all new sockets are given a linger timeout /// of zero. /// /// # Default /// The default value is `false`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.no_linger(), false); /// /// ctx.set_no_linger(true)?; /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> { self.raw.as_ref().set_bool(RawCtxOption::Blocky,!enabled) } /// Shutdown the ØMQ context context. /// /// Context shutdown will cause any blocking operations currently in /// progress on sockets open within context to fail immediately with /// [`CtxTerminated`]. /// /// Any further operations on sockets open within context shall fail with /// with [`CtxTerminated`]. /// /// [`CtxTerminated`]:../error/enum.ErrorKind.html#variant.CtxTerminated pub fn shutdown(&self) { self.raw.shutdown() } pub(crate) fn as_ptr(&self) -> *mut c_void { self.raw.ctx } } impl Default for Ctx { fn default() -> Self { Self::new() } } impl<'a> From<&'a Ctx> for Ctx { fn from(c: &'a Ctx) -> Ctx { c.to_owned() } }
self.raw.as_ref().get(RawCtxOption::SocketLimit) }
identifier_body
ctx.rs
//! The ØMQ context type. use crate::{auth::server::AuthServer, error::*}; use libzmq_sys as sys; use sys::errno; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::{ os::raw::{c_int, c_void}, ptr, str, sync::Arc, thread, }; lazy_static! { static ref GLOBAL_CONTEXT: Ctx = Ctx::new(); } #[derive(Copy, Clone, Debug)] enum RawCtxOption { IOThreads, MaxSockets, MaxMsgSize, SocketLimit, IPV6, Blocky, } impl From<RawCtxOption> for c_int { fn from(r: RawCtxOption) -> c_int { match r { RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int, RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int, RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int, RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int, RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int, RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int, } } } #[derive(Debug)] struct RawCtx { ctx: *mut c_void, } impl RawCtx { fn get(&self, option: RawCtxOption) -> i32 { unsafe { sys::zmq_ctx_get(self.ctx, option.into()) } } fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> { let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) }; if rc == -1 { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput { msg: "invalid value", })), _ => panic!(msg_from_errno(errno)), } } else { Ok(()) } } fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> { self.set(opt, flag as i32) } fn get_bool(&self, opt: RawCtxOption) -> bool { let flag = self.get(opt); flag!= 0 } fn terminate(&self) { // We loop in case `zmq_ctx_term` get interrupted by a signal. loop { let rc = unsafe { sys::zmq_ctx_term(self.ctx) }; if rc == 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) }; // Should never fail. assert_eq!(rc, 0); } } // The `zmq_ctx` is internally threadsafe. unsafe impl Send for RawCtx {} unsafe impl Sync for RawCtx {} impl Drop for RawCtx { fn drop(&mut self) { self.terminate() } } impl PartialEq for RawCtx { /// Compares the two underlying raw C pointers. fn eq(&self, other: &Self) -> bool { ptr::eq(self.ctx, other.ctx) } } impl Eq for RawCtx {} impl Default for RawCtx { fn default() -> Self { let ctx = unsafe { sys::zmq_ctx_new() }; if ctx.is_null() {
Self { ctx } } } /// A config for a [`Ctx`]. /// /// Usefull in configuration files. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxConfig { io_threads: Option<i32>, max_msg_size: Option<i32>, max_sockets: Option<i32>, no_linger: Option<bool>, } impl CtxConfig { pub fn new() -> Self { Self::default() } pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { if let Some(value) = self.io_threads { ctx.set_io_threads(value)?; } if let Some(value) = self.max_sockets { ctx.set_max_sockets(value)?; } if let Some(value) = self.max_msg_size { ctx.set_max_msg_size(value)?; } if let Some(value) = self.no_linger { ctx.set_no_linger(value)?; } Ok(()) } pub fn io_threads(&self) -> Option<i32> { self.io_threads } pub fn set_io_threads(&mut self, value: Option<i32>) { self.io_threads = value; } pub fn max_msg_size(&self) -> Option<i32> { self.max_msg_size } pub fn set_max_msg_size(&mut self, value: Option<i32>) { self.max_msg_size = value; } pub fn max_sockets(&mut self) -> Option<i32> { self.max_sockets } pub fn set_max_sockets(&mut self, value: Option<i32>) { self.max_sockets = value; } pub fn no_linger(&self) -> Option<bool> { self.no_linger } pub fn set_no_linger(&mut self, value: Option<bool>) { self.no_linger = value; } } /// A convenience builder for a [`Ctx`]. /// /// Makes complex context configuration more convenient. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxBuilder { inner: CtxConfig, } impl CtxBuilder { pub fn new() -> Self { Self::default() } /// Builds a `Ctx` from a `CtxBuilder`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let ctx = CtxBuilder::new() /// .io_threads(2) /// .no_linger() /// .build()?; /// /// assert_eq!(ctx.io_threads(), 2); /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } /// Applies a `CtxBuilder` to an existing `Ctx`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let global = Ctx::global(); /// /// CtxBuilder::new() /// .io_threads(0) /// .max_msg_size(420) /// .max_sockets(69) /// .no_linger() /// .apply(global)?; /// /// assert_eq!(global.io_threads(), 0); /// assert_eq!(global.max_msg_size(), 420); /// assert_eq!(global.no_linger(), true); /// assert_eq!(global.max_sockets(), 69); /// # /// # Ok(()) /// # } /// ``` pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { self.inner.apply(ctx) } /// See [`set_io_threads`]. /// /// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads pub fn io_threads(&mut self, value: i32) -> &mut Self { self.inner.set_io_threads(Some(value)); self } /// See [`set_max_msg_size`]. /// /// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size pub fn max_msg_size(&mut self, value: i32) -> &mut Self { self.inner.set_max_msg_size(Some(value)); self } /// See [`set_max_sockets`]. /// /// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets pub fn max_sockets(&mut self, value: i32) -> &mut Self { self.inner.set_max_sockets(Some(value)); self } /// See [`set_no_linger`]. /// /// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger pub fn no_linger(&mut self) -> &mut Self { self.inner.set_no_linger(Some(true)); self } } /// Keeps the list of sockets and manages the async I/O thread and /// internal queries. /// /// Each context also has an associated `AuthServer` which handles socket /// authentification. /// /// # Drop /// The context will call terminate when dropped which will cause all /// blocking calls to fail with `CtxTerminated`, then block until /// the following conditions are met: /// * All sockets open within context have been dropped. /// * All messages sent by the application with have either been physically /// transferred to a network peer, or the socket's linger period has expired. /// /// # Thread safety /// A ØMQ context is internally thread safe. /// /// # Multiple Contexts /// Multiple contexts are allowed but are considered exotic. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Ctx { raw: Arc<RawCtx>, } impl Ctx { /// Create a new ØMQ context. /// /// For almost all use cases, using and configuring the [`global`] context /// will be enought. /// /// See [`zmq_ctx_new`]. /// /// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new /// /// # Usage Example /// ``` /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// let cloned = ctx.clone(); /// /// assert_eq!(ctx, cloned); /// assert_ne!(ctx, Ctx::new()); /// ``` /// /// [`global`]: #method.global pub fn new() -> Self { let raw = Arc::new(RawCtx::default()); // Enable ipv6 by default. raw.set_bool(RawCtxOption::IPV6, true).unwrap(); let ctx = Self { raw }; // Start a `ZAP` handler for the context. let mut auth = AuthServer::with_ctx(&ctx).unwrap(); // This thread is guaranteed to terminate before the ctx // since it holds a `Arc` to it. No need to store & join the // thread handle. thread::spawn(move || auth.run()); ctx } /// Returns a reference to the global context. /// /// This is a singleton used by sockets created via their respective /// `::new()` method. It merely exists for convenience and is no different /// from a context obtained via `Ctx::new()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::{Ctx, Client}; /// /// // A socket created via `new` will use the global `Ctx`. /// let client = Client::new()?; /// assert_eq!(client.ctx(), Ctx::global()); /// # /// # Ok(()) /// # } /// ``` pub fn global() -> &'static Ctx { &GLOBAL_CONTEXT } /// Returns the size of the ØMQ thread pool for this context. pub fn io_threads(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::IOThreads) } /// Set the size of the ØMQ thread pool to handle I/O operations. /// /// "The general rule of thumb is to allow one I/O thread per gigabyte of /// data in or out per second." - [`Pieter Hintjens`] /// /// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads /// /// # Default /// The default value is `1`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.io_threads(), 1); /// /// // Lets say our app exclusively uses the inproc transport /// // for messaging. Then we dont need any I/O threads. /// ctx.set_io_threads(0)?; /// assert_eq!(ctx.io_threads(), 0); /// # /// # Ok(()) /// # } /// ``` pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads) } /// Returns the maximum number of sockets allowed for this context. pub fn max_sockets(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxSockets) } /// Sets the maximum number of sockets allowed on the context. /// /// # Default /// The default value is `1023`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_sockets(), 1023); /// /// ctx.set_max_sockets(420)?; /// assert_eq!(ctx.max_sockets(), 420); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxSockets, max) } /// Returns the maximum size of a message allowed for this context. pub fn max_msg_size(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxMsgSize) } /// Sets the maximum allowed size of a message sent in the context. /// /// # Default /// The default value is `i32::max_value()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_msg_size(), i32::max_value()); /// /// ctx.set_max_msg_size(i32::max_value() - 1)?; /// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size) } /// Returns the largest number of sockets that the context will accept. pub fn socket_limit(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::SocketLimit) } /// A value of `true` indicates that all new sockets are given a /// linger timeout of zero. /// pub fn no_linger(&self) -> bool { !self.raw.as_ref().get_bool(RawCtxOption::Blocky) } /// When set to `true`, all new sockets are given a linger timeout /// of zero. /// /// # Default /// The default value is `false`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.no_linger(), false); /// /// ctx.set_no_linger(true)?; /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> { self.raw.as_ref().set_bool(RawCtxOption::Blocky,!enabled) } /// Shutdown the ØMQ context context. /// /// Context shutdown will cause any blocking operations currently in /// progress on sockets open within context to fail immediately with /// [`CtxTerminated`]. /// /// Any further operations on sockets open within context shall fail with /// with [`CtxTerminated`]. /// /// [`CtxTerminated`]:../error/enum.ErrorKind.html#variant.CtxTerminated pub fn shutdown(&self) { self.raw.shutdown() } pub(crate) fn as_ptr(&self) -> *mut c_void { self.raw.ctx } } impl Default for Ctx { fn default() -> Self { Self::new() } } impl<'a> From<&'a Ctx> for Ctx { fn from(c: &'a Ctx) -> Ctx { c.to_owned() } }
panic!(msg_from_errno(unsafe { sys::zmq_errno() })); }
conditional_block
ctx.rs
//! The ØMQ context type. use crate::{auth::server::AuthServer, error::*}; use libzmq_sys as sys; use sys::errno; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::{ os::raw::{c_int, c_void}, ptr, str, sync::Arc, thread, }; lazy_static! { static ref GLOBAL_CONTEXT: Ctx = Ctx::new();
IOThreads, MaxSockets, MaxMsgSize, SocketLimit, IPV6, Blocky, } impl From<RawCtxOption> for c_int { fn from(r: RawCtxOption) -> c_int { match r { RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int, RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int, RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int, RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int, RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int, RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int, } } } #[derive(Debug)] struct RawCtx { ctx: *mut c_void, } impl RawCtx { fn get(&self, option: RawCtxOption) -> i32 { unsafe { sys::zmq_ctx_get(self.ctx, option.into()) } } fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> { let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) }; if rc == -1 { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput { msg: "invalid value", })), _ => panic!(msg_from_errno(errno)), } } else { Ok(()) } } fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> { self.set(opt, flag as i32) } fn get_bool(&self, opt: RawCtxOption) -> bool { let flag = self.get(opt); flag!= 0 } fn terminate(&self) { // We loop in case `zmq_ctx_term` get interrupted by a signal. loop { let rc = unsafe { sys::zmq_ctx_term(self.ctx) }; if rc == 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) }; // Should never fail. assert_eq!(rc, 0); } } // The `zmq_ctx` is internally threadsafe. unsafe impl Send for RawCtx {} unsafe impl Sync for RawCtx {} impl Drop for RawCtx { fn drop(&mut self) { self.terminate() } } impl PartialEq for RawCtx { /// Compares the two underlying raw C pointers. fn eq(&self, other: &Self) -> bool { ptr::eq(self.ctx, other.ctx) } } impl Eq for RawCtx {} impl Default for RawCtx { fn default() -> Self { let ctx = unsafe { sys::zmq_ctx_new() }; if ctx.is_null() { panic!(msg_from_errno(unsafe { sys::zmq_errno() })); } Self { ctx } } } /// A config for a [`Ctx`]. /// /// Usefull in configuration files. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxConfig { io_threads: Option<i32>, max_msg_size: Option<i32>, max_sockets: Option<i32>, no_linger: Option<bool>, } impl CtxConfig { pub fn new() -> Self { Self::default() } pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { if let Some(value) = self.io_threads { ctx.set_io_threads(value)?; } if let Some(value) = self.max_sockets { ctx.set_max_sockets(value)?; } if let Some(value) = self.max_msg_size { ctx.set_max_msg_size(value)?; } if let Some(value) = self.no_linger { ctx.set_no_linger(value)?; } Ok(()) } pub fn io_threads(&self) -> Option<i32> { self.io_threads } pub fn set_io_threads(&mut self, value: Option<i32>) { self.io_threads = value; } pub fn max_msg_size(&self) -> Option<i32> { self.max_msg_size } pub fn set_max_msg_size(&mut self, value: Option<i32>) { self.max_msg_size = value; } pub fn max_sockets(&mut self) -> Option<i32> { self.max_sockets } pub fn set_max_sockets(&mut self, value: Option<i32>) { self.max_sockets = value; } pub fn no_linger(&self) -> Option<bool> { self.no_linger } pub fn set_no_linger(&mut self, value: Option<bool>) { self.no_linger = value; } } /// A convenience builder for a [`Ctx`]. /// /// Makes complex context configuration more convenient. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxBuilder { inner: CtxConfig, } impl CtxBuilder { pub fn new() -> Self { Self::default() } /// Builds a `Ctx` from a `CtxBuilder`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let ctx = CtxBuilder::new() /// .io_threads(2) /// .no_linger() /// .build()?; /// /// assert_eq!(ctx.io_threads(), 2); /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } /// Applies a `CtxBuilder` to an existing `Ctx`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let global = Ctx::global(); /// /// CtxBuilder::new() /// .io_threads(0) /// .max_msg_size(420) /// .max_sockets(69) /// .no_linger() /// .apply(global)?; /// /// assert_eq!(global.io_threads(), 0); /// assert_eq!(global.max_msg_size(), 420); /// assert_eq!(global.no_linger(), true); /// assert_eq!(global.max_sockets(), 69); /// # /// # Ok(()) /// # } /// ``` pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { self.inner.apply(ctx) } /// See [`set_io_threads`]. /// /// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads pub fn io_threads(&mut self, value: i32) -> &mut Self { self.inner.set_io_threads(Some(value)); self } /// See [`set_max_msg_size`]. /// /// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size pub fn max_msg_size(&mut self, value: i32) -> &mut Self { self.inner.set_max_msg_size(Some(value)); self } /// See [`set_max_sockets`]. /// /// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets pub fn max_sockets(&mut self, value: i32) -> &mut Self { self.inner.set_max_sockets(Some(value)); self } /// See [`set_no_linger`]. /// /// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger pub fn no_linger(&mut self) -> &mut Self { self.inner.set_no_linger(Some(true)); self } } /// Keeps the list of sockets and manages the async I/O thread and /// internal queries. /// /// Each context also has an associated `AuthServer` which handles socket /// authentification. /// /// # Drop /// The context will call terminate when dropped which will cause all /// blocking calls to fail with `CtxTerminated`, then block until /// the following conditions are met: /// * All sockets open within context have been dropped. /// * All messages sent by the application with have either been physically /// transferred to a network peer, or the socket's linger period has expired. /// /// # Thread safety /// A ØMQ context is internally thread safe. /// /// # Multiple Contexts /// Multiple contexts are allowed but are considered exotic. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Ctx { raw: Arc<RawCtx>, } impl Ctx { /// Create a new ØMQ context. /// /// For almost all use cases, using and configuring the [`global`] context /// will be enought. /// /// See [`zmq_ctx_new`]. /// /// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new /// /// # Usage Example /// ``` /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// let cloned = ctx.clone(); /// /// assert_eq!(ctx, cloned); /// assert_ne!(ctx, Ctx::new()); /// ``` /// /// [`global`]: #method.global pub fn new() -> Self { let raw = Arc::new(RawCtx::default()); // Enable ipv6 by default. raw.set_bool(RawCtxOption::IPV6, true).unwrap(); let ctx = Self { raw }; // Start a `ZAP` handler for the context. let mut auth = AuthServer::with_ctx(&ctx).unwrap(); // This thread is guaranteed to terminate before the ctx // since it holds a `Arc` to it. No need to store & join the // thread handle. thread::spawn(move || auth.run()); ctx } /// Returns a reference to the global context. /// /// This is a singleton used by sockets created via their respective /// `::new()` method. It merely exists for convenience and is no different /// from a context obtained via `Ctx::new()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::{Ctx, Client}; /// /// // A socket created via `new` will use the global `Ctx`. /// let client = Client::new()?; /// assert_eq!(client.ctx(), Ctx::global()); /// # /// # Ok(()) /// # } /// ``` pub fn global() -> &'static Ctx { &GLOBAL_CONTEXT } /// Returns the size of the ØMQ thread pool for this context. pub fn io_threads(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::IOThreads) } /// Set the size of the ØMQ thread pool to handle I/O operations. /// /// "The general rule of thumb is to allow one I/O thread per gigabyte of /// data in or out per second." - [`Pieter Hintjens`] /// /// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads /// /// # Default /// The default value is `1`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.io_threads(), 1); /// /// // Lets say our app exclusively uses the inproc transport /// // for messaging. Then we dont need any I/O threads. /// ctx.set_io_threads(0)?; /// assert_eq!(ctx.io_threads(), 0); /// # /// # Ok(()) /// # } /// ``` pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads) } /// Returns the maximum number of sockets allowed for this context. pub fn max_sockets(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxSockets) } /// Sets the maximum number of sockets allowed on the context. /// /// # Default /// The default value is `1023`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_sockets(), 1023); /// /// ctx.set_max_sockets(420)?; /// assert_eq!(ctx.max_sockets(), 420); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxSockets, max) } /// Returns the maximum size of a message allowed for this context. pub fn max_msg_size(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxMsgSize) } /// Sets the maximum allowed size of a message sent in the context. /// /// # Default /// The default value is `i32::max_value()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_msg_size(), i32::max_value()); /// /// ctx.set_max_msg_size(i32::max_value() - 1)?; /// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size) } /// Returns the largest number of sockets that the context will accept. pub fn socket_limit(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::SocketLimit) } /// A value of `true` indicates that all new sockets are given a /// linger timeout of zero. /// pub fn no_linger(&self) -> bool { !self.raw.as_ref().get_bool(RawCtxOption::Blocky) } /// When set to `true`, all new sockets are given a linger timeout /// of zero. /// /// # Default /// The default value is `false`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.no_linger(), false); /// /// ctx.set_no_linger(true)?; /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> { self.raw.as_ref().set_bool(RawCtxOption::Blocky,!enabled) } /// Shutdown the ØMQ context context. /// /// Context shutdown will cause any blocking operations currently in /// progress on sockets open within context to fail immediately with /// [`CtxTerminated`]. /// /// Any further operations on sockets open within context shall fail with /// with [`CtxTerminated`]. /// /// [`CtxTerminated`]:../error/enum.ErrorKind.html#variant.CtxTerminated pub fn shutdown(&self) { self.raw.shutdown() } pub(crate) fn as_ptr(&self) -> *mut c_void { self.raw.ctx } } impl Default for Ctx { fn default() -> Self { Self::new() } } impl<'a> From<&'a Ctx> for Ctx { fn from(c: &'a Ctx) -> Ctx { c.to_owned() } }
} #[derive(Copy, Clone, Debug)] enum RawCtxOption {
random_line_split
ctx.rs
//! The ØMQ context type. use crate::{auth::server::AuthServer, error::*}; use libzmq_sys as sys; use sys::errno; use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use std::{ os::raw::{c_int, c_void}, ptr, str, sync::Arc, thread, }; lazy_static! { static ref GLOBAL_CONTEXT: Ctx = Ctx::new(); } #[derive(Copy, Clone, Debug)] enum RawCtxOption { IOThreads, MaxSockets, MaxMsgSize, SocketLimit, IPV6, Blocky, } impl From<RawCtxOption> for c_int { fn from(r: RawCtxOption) -> c_int { match r { RawCtxOption::IOThreads => sys::ZMQ_IO_THREADS as c_int, RawCtxOption::MaxSockets => sys::ZMQ_MAX_SOCKETS as c_int, RawCtxOption::MaxMsgSize => sys::ZMQ_MAX_MSGSZ as c_int, RawCtxOption::SocketLimit => sys::ZMQ_SOCKET_LIMIT as c_int, RawCtxOption::IPV6 => sys::ZMQ_IPV6 as c_int, RawCtxOption::Blocky => sys::ZMQ_BLOCKY as c_int, } } } #[derive(Debug)] struct RawCtx { ctx: *mut c_void, } impl RawCtx { fn get(&self, option: RawCtxOption) -> i32 { unsafe { sys::zmq_ctx_get(self.ctx, option.into()) } } fn set(&self, option: RawCtxOption, value: i32) -> Result<(), Error> { let rc = unsafe { sys::zmq_ctx_set(self.ctx, option.into(), value) }; if rc == -1 { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINVAL => Err(Error::new(ErrorKind::InvalidInput { msg: "invalid value", })), _ => panic!(msg_from_errno(errno)), } } else { Ok(()) } } fn set_bool(&self, opt: RawCtxOption, flag: bool) -> Result<(), Error> { self.set(opt, flag as i32) } fn get_bool(&self, opt: RawCtxOption) -> bool { let flag = self.get(opt); flag!= 0 } fn terminate(&self) { // We loop in case `zmq_ctx_term` get interrupted by a signal. loop { let rc = unsafe { sys::zmq_ctx_term(self.ctx) }; if rc == 0 { break; } else { let errno = unsafe { sys::zmq_errno() }; match errno { errno::EINTR => (), _ => unreachable!(), } } } } fn shutdown(&self) { let rc = unsafe { sys::zmq_ctx_shutdown(self.ctx) }; // Should never fail. assert_eq!(rc, 0); } } // The `zmq_ctx` is internally threadsafe. unsafe impl Send for RawCtx {} unsafe impl Sync for RawCtx {} impl Drop for RawCtx { fn drop(&mut self) { self.terminate() } } impl PartialEq for RawCtx { /// Compares the two underlying raw C pointers. fn eq(&self, other: &Self) -> bool { ptr::eq(self.ctx, other.ctx) } } impl Eq for RawCtx {} impl Default for RawCtx { fn default() -> Self { let ctx = unsafe { sys::zmq_ctx_new() }; if ctx.is_null() { panic!(msg_from_errno(unsafe { sys::zmq_errno() })); } Self { ctx } } } /// A config for a [`Ctx`]. /// /// Usefull in configuration files. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxConfig { io_threads: Option<i32>, max_msg_size: Option<i32>, max_sockets: Option<i32>, no_linger: Option<bool>, } impl CtxConfig { pub fn new() -> Self { Self::default() } pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { if let Some(value) = self.io_threads { ctx.set_io_threads(value)?; } if let Some(value) = self.max_sockets { ctx.set_max_sockets(value)?; } if let Some(value) = self.max_msg_size { ctx.set_max_msg_size(value)?; } if let Some(value) = self.no_linger { ctx.set_no_linger(value)?; } Ok(()) } pub fn io_threads(&self) -> Option<i32> { self.io_threads } pub fn set_io_threads(&mut self, value: Option<i32>) { self.io_threads = value; } pub fn max_msg_size(&self) -> Option<i32> { self.max_msg_size } pub fn set_max_msg_size(&mut self, value: Option<i32>) { self.max_msg_size = value; } pub fn max_sockets(&mut self) -> Option<i32> { self.max_sockets } pub fn set_max_sockets(&mut self, value: Option<i32>) { self.max_sockets = value; } pub fn no_linger(&self) -> Option<bool> { self.no_linger } pub fn set_no_linger(&mut self, value: Option<bool>) { self.no_linger = value; } } /// A convenience builder for a [`Ctx`]. /// /// Makes complex context configuration more convenient. /// /// [`Ctx`]: struct.Ctx.html #[derive(Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct CtxBuilder { inner: CtxConfig, } impl CtxBuilder { pub fn new() -> Self { Self::default() } /// Builds a `Ctx` from a `CtxBuilder`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let ctx = CtxBuilder::new() /// .io_threads(2) /// .no_linger() /// .build()?; /// /// assert_eq!(ctx.io_threads(), 2); /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn build(&self) -> Result<Ctx, Error> { let ctx = Ctx::new(); self.apply(&ctx)?; Ok(ctx) } /// Applies a `CtxBuilder` to an existing `Ctx`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::*; /// /// let global = Ctx::global(); /// /// CtxBuilder::new() /// .io_threads(0) /// .max_msg_size(420) /// .max_sockets(69) /// .no_linger() /// .apply(global)?; /// /// assert_eq!(global.io_threads(), 0); /// assert_eq!(global.max_msg_size(), 420); /// assert_eq!(global.no_linger(), true); /// assert_eq!(global.max_sockets(), 69); /// # /// # Ok(()) /// # } /// ``` pub fn apply(&self, ctx: &Ctx) -> Result<(), Error> { self.inner.apply(ctx) } /// See [`set_io_threads`]. /// /// [`set_io_threads`]: struct.Ctx.html#method.set_io_threads pub fn i
&mut self, value: i32) -> &mut Self { self.inner.set_io_threads(Some(value)); self } /// See [`set_max_msg_size`]. /// /// [`set_max_msg_size`]: struct.Ctx.html#method.set_max_msg_size pub fn max_msg_size(&mut self, value: i32) -> &mut Self { self.inner.set_max_msg_size(Some(value)); self } /// See [`set_max_sockets`]. /// /// [`set_max_sockets`]: struct.Ctx.html#method.set_max_sockets pub fn max_sockets(&mut self, value: i32) -> &mut Self { self.inner.set_max_sockets(Some(value)); self } /// See [`set_no_linger`]. /// /// [`set_no_linger`]: struct.Ctx.html#method.set_no_linger pub fn no_linger(&mut self) -> &mut Self { self.inner.set_no_linger(Some(true)); self } } /// Keeps the list of sockets and manages the async I/O thread and /// internal queries. /// /// Each context also has an associated `AuthServer` which handles socket /// authentification. /// /// # Drop /// The context will call terminate when dropped which will cause all /// blocking calls to fail with `CtxTerminated`, then block until /// the following conditions are met: /// * All sockets open within context have been dropped. /// * All messages sent by the application with have either been physically /// transferred to a network peer, or the socket's linger period has expired. /// /// # Thread safety /// A ØMQ context is internally thread safe. /// /// # Multiple Contexts /// Multiple contexts are allowed but are considered exotic. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Ctx { raw: Arc<RawCtx>, } impl Ctx { /// Create a new ØMQ context. /// /// For almost all use cases, using and configuring the [`global`] context /// will be enought. /// /// See [`zmq_ctx_new`]. /// /// [`zmq_ctx_new`]: http://api.zeromq.org/master:zmq-ctx-new /// /// # Usage Example /// ``` /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// let cloned = ctx.clone(); /// /// assert_eq!(ctx, cloned); /// assert_ne!(ctx, Ctx::new()); /// ``` /// /// [`global`]: #method.global pub fn new() -> Self { let raw = Arc::new(RawCtx::default()); // Enable ipv6 by default. raw.set_bool(RawCtxOption::IPV6, true).unwrap(); let ctx = Self { raw }; // Start a `ZAP` handler for the context. let mut auth = AuthServer::with_ctx(&ctx).unwrap(); // This thread is guaranteed to terminate before the ctx // since it holds a `Arc` to it. No need to store & join the // thread handle. thread::spawn(move || auth.run()); ctx } /// Returns a reference to the global context. /// /// This is a singleton used by sockets created via their respective /// `::new()` method. It merely exists for convenience and is no different /// from a context obtained via `Ctx::new()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::{Ctx, Client}; /// /// // A socket created via `new` will use the global `Ctx`. /// let client = Client::new()?; /// assert_eq!(client.ctx(), Ctx::global()); /// # /// # Ok(()) /// # } /// ``` pub fn global() -> &'static Ctx { &GLOBAL_CONTEXT } /// Returns the size of the ØMQ thread pool for this context. pub fn io_threads(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::IOThreads) } /// Set the size of the ØMQ thread pool to handle I/O operations. /// /// "The general rule of thumb is to allow one I/O thread per gigabyte of /// data in or out per second." - [`Pieter Hintjens`] /// /// [`Pieter Hintjens`]: http://zguide.zeromq.org/page:all#I-O-Threads /// /// # Default /// The default value is `1`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.io_threads(), 1); /// /// // Lets say our app exclusively uses the inproc transport /// // for messaging. Then we dont need any I/O threads. /// ctx.set_io_threads(0)?; /// assert_eq!(ctx.io_threads(), 0); /// # /// # Ok(()) /// # } /// ``` pub fn set_io_threads(&self, nb_threads: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::IOThreads, nb_threads) } /// Returns the maximum number of sockets allowed for this context. pub fn max_sockets(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxSockets) } /// Sets the maximum number of sockets allowed on the context. /// /// # Default /// The default value is `1023`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_sockets(), 1023); /// /// ctx.set_max_sockets(420)?; /// assert_eq!(ctx.max_sockets(), 420); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_sockets(&self, max: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxSockets, max) } /// Returns the maximum size of a message allowed for this context. pub fn max_msg_size(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::MaxMsgSize) } /// Sets the maximum allowed size of a message sent in the context. /// /// # Default /// The default value is `i32::max_value()`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.max_msg_size(), i32::max_value()); /// /// ctx.set_max_msg_size(i32::max_value() - 1)?; /// assert_eq!(ctx.max_msg_size(), i32::max_value() - 1); /// # /// # Ok(()) /// # } /// ``` pub fn set_max_msg_size(&self, size: i32) -> Result<(), Error> { self.raw.as_ref().set(RawCtxOption::MaxMsgSize, size) } /// Returns the largest number of sockets that the context will accept. pub fn socket_limit(&self) -> i32 { self.raw.as_ref().get(RawCtxOption::SocketLimit) } /// A value of `true` indicates that all new sockets are given a /// linger timeout of zero. /// pub fn no_linger(&self) -> bool { !self.raw.as_ref().get_bool(RawCtxOption::Blocky) } /// When set to `true`, all new sockets are given a linger timeout /// of zero. /// /// # Default /// The default value is `false`. /// /// # Usage Example /// ``` /// # use failure::Error; /// # /// # fn main() -> Result<(), Error> { /// use libzmq::Ctx; /// /// let ctx = Ctx::new(); /// assert_eq!(ctx.no_linger(), false); /// /// ctx.set_no_linger(true)?; /// assert_eq!(ctx.no_linger(), true); /// # /// # Ok(()) /// # } /// ``` pub fn set_no_linger(&self, enabled: bool) -> Result<(), Error> { self.raw.as_ref().set_bool(RawCtxOption::Blocky,!enabled) } /// Shutdown the ØMQ context context. /// /// Context shutdown will cause any blocking operations currently in /// progress on sockets open within context to fail immediately with /// [`CtxTerminated`]. /// /// Any further operations on sockets open within context shall fail with /// with [`CtxTerminated`]. /// /// [`CtxTerminated`]:../error/enum.ErrorKind.html#variant.CtxTerminated pub fn shutdown(&self) { self.raw.shutdown() } pub(crate) fn as_ptr(&self) -> *mut c_void { self.raw.ctx } } impl Default for Ctx { fn default() -> Self { Self::new() } } impl<'a> From<&'a Ctx> for Ctx { fn from(c: &'a Ctx) -> Ctx { c.to_owned() } }
o_threads(
identifier_name
main.rs
HashMap::new(); let mut offsets = Vec::new(); let mut num_bones = 0u32; for mesh in scene.get_meshes().iter() { for bone in mesh.get_bones().iter() { let name = bone.name.to_string(); match bone_map.get(&name) { Some(_) => continue, None => { bone_map.insert(name, num_bones); offsets.push(bone.offset_matrix); num_bones += 1; } } } } BoneMap { bone_map: bone_map, offsets: offsets, transforms: Vec::from_elem(MAX_BONES, vecmath::mat4_id()), } } #[inline(always)] fn get_id(&self, name: &String) -> Option<u32> { match self.bone_map.get(name) { None => None, Some(val) => Some(*val), } } } struct ModelComponent { pub batch: ModelBatch, pub shader_data: ShaderParam, } struct Model<'a> { pub vertices: Vec<Vertex>, pub indices: Vec<u32>, pub batches: Vec<ModelComponent>, pub scene: ai::Scene<'a>, pub bone_map: RefCell<BoneMap>, pub global_inverse: ai::Matrix4x4, pub bone_transform_buffer: gfx::BufferHandle<Mat4>, } #[inline(always)] fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T { return start + (end - start) * s; } impl<'a> Model<'a> { fn from_file(ai_scene: ai::Scene<'a>, graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>, program: &gfx::ProgramHandle, state: &gfx::DrawState, texture_store: &TextureStore, ) -> Model<'a> { // calculate the space we need to allocate let mut num_vertices = 0; let mut num_indices = 0; for mesh in ai_scene.get_meshes().iter() { num_vertices += mesh.num_vertices; num_indices += mesh.num_faces * 3; } // prepare the data structures used to store the scene let mut vertices = Vec::with_capacity(num_vertices as uint); let mut indices = Vec::with_capacity(num_indices as uint); // The bone weights and ids. Each vertex may be influenced by upto // 4 bones let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint, [0.0,..4]); let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint, [0,..4]); let bone_map = BoneMap::new(&ai_scene); // stores the first index of each mesh, used for creating batches let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1); let mut materials = Vec::with_capacity(ai_scene.num_materials as uint); let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint);
// find the textures used by this model from the list of materials for mat in ai_scene.get_materials().iter() { let texture_src = mat.get_texture(ai::material::TextureType::Diffuse, 0 ); match texture_src { Some(s) => { match texture_store.textures.get(&s) { Some(t) => materials.push(t), None => panic!("couldn't load texture: {}", s), } } None => { panic!("could read texture name from material: {}", texture_src); } } } // prepare the data for a format that can be loaded to the gpu { start_indices.push(0); for mesh in ai_scene.get_meshes().iter() { let vert_id_offset = vertices.len() as u32; // get all the bone information for this mesh for bone in mesh.get_bones().iter() { let bone_id = bone_map.get_id(&bone.name.to_string()); // println!("{}: Bone id and name: {} ===> {}", // mesh_num, bone_id, bone.name); let bone_id = match bone_id { None => panic!("Invaild bone reference"), Some(id) => id, }; 'next_weight: for vert_weight in bone.get_weights().iter() { let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint; for i in range(0u, 4) { if bone_ids[vertex_id][i] == 0 { bone_weights[vertex_id][i] = vert_weight.weight; bone_ids[vertex_id][i] = bone_id; continue 'next_weight; } } // assimp should have limited bone weights to 4 unreachable!(); } } let verts = mesh.get_vertices(); let norms = mesh.get_normals(); let tex_coords = mesh.get_texture_coords(); // fill up the vertex buffer for i in range(0u, verts.len()) { vertices.push( Vertex { a_position: verts[i].to_array(), a_normal: norms[i].to_array(), a_tex_coord: if tex_coords.len() == 0 { [0.0, 0.0, 0.0] } else { // only support 1 texture coord tex_coords[0][i].to_array() }, a_bone_weights: bone_weights[i + vert_id_offset as uint], a_bone_ids: bone_ids[i + vert_id_offset as uint], }); } // fill up the index buffer for face in mesh.get_faces().iter() { let face_indices = face.get_indices(); assert!(face_indices.len() == 3); indices.push(face_indices[0] + vert_id_offset); indices.push(face_indices[1] + vert_id_offset); indices.push(face_indices[2] + vert_id_offset); } start_indices.push(indices.len() as u32); } } // create the vertex and index buffers // generate the batches used to draw the object { let vert_buf = graphics.device.create_mesh(vertices.as_slice()); let ind_buf = graphics.device.create_buffer_static(indices.as_slice()); let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1); for ind in start_indices.windows(2) { buf_slices.push(gfx::Slice { start: ind[0], end: ind[1], prim_type: gfx::TriangleList, // prim_type: gfx::LineStrip, kind: gfx::SliceKind::Index32(ind_buf, 0 as u32), }); } for (slice, mesh) in buf_slices.iter() .zip(ai_scene.get_meshes().iter()) { let shader_data = ShaderParam { u_model_view_proj: vecmath::mat4_id(), t_color: (*materials[mesh.material_index as uint], None), u_bone_transformations: u_bone_transformations.raw(), }; batches.push(ModelComponent { batch: graphics.make_batch(program, &vert_buf, *slice, state).unwrap(), shader_data: shader_data, }); } } Model { vertices: vertices, indices: indices, batches: batches, bone_map: RefCell::new(bone_map), bone_transform_buffer: u_bone_transformations, global_inverse: ai_scene.get_root_node().transformation.inverse(), scene: ai_scene, } } fn interpolate_position(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Vector3D { let keys = node.get_position_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for pos_keys in keys.windows(2) { // note: once we find a match, we return if time < pos_keys[1].time { let dt = pos_keys[1].time - pos_keys[0].time; // how far inbetween the frams we are on a scale from 0 to 1 let s = (time - pos_keys[0].time) / dt; return lerp(pos_keys[0].value, pos_keys[1].value, s as f32); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn interpolate_scaling(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Vector3D { let keys = node.get_scaling_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for scale_keys in keys.windows(2) { // note: once we find a match, we return if time < scale_keys[1].time { let dt = scale_keys[1].time - scale_keys[0].time; // how far inbetween the frams we are on a scale from 0 to 1 let s = (time - scale_keys[0].time) / dt; return lerp(scale_keys[0].value, scale_keys[1].value, s as f32); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn interpolate_rotation(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Quaternion { let keys = node.get_rotation_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for rot_keys in keys.windows(2) { // note: once we find a match, we return if time < rot_keys[1].time { let dt = rot_keys[1].time - rot_keys[0].time; // how far inbetween the frames we are on a scale from 0 to 1 let s = (time - rot_keys[0].time) / dt; // nlerp return lerp(rot_keys[0].value, rot_keys[1].value, s as f32).normalize(); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn update_bone_transforms(&self, time: f64, anim_num: uint, scene_node: &ai::scene::Node, parent_transform: &ai::Matrix4x4, ) { // calculate the transformation matrix for this node let animation = self.scene.get_animations()[anim_num]; let node_transform = match animation.find_node_anim(&scene_node.name) { Some(node_anim) => { self.interpolate_position(time, node_anim).translation_matrix() * self.interpolate_rotation(time, node_anim).rotation_matrix() * self.interpolate_scaling(time, node_anim).scaling_matrix() }, None => { scene_node.transformation } }; let node_to_global = *parent_transform * node_transform; let opt_id = { self.bone_map.borrow().get_id(&scene_node.name.to_string()) }; match opt_id { None => { }, Some(id) => { let offset = { self.bone_map.borrow().offsets[id as uint] }; { self.bone_map.borrow_mut().transforms[id as uint] = (self.global_inverse * node_to_global * offset) .transpose().to_array(); } } } for child in scene_node.get_children().iter() { self.update_bone_transforms(time, anim_num, *child, &node_to_global, ); } } fn draw(&mut self, graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>, frame: &gfx::Frame, time: f64, transform: Mat4, ) { self.update_bone_transforms(time, 0, self.scene.get_root_node(), &ai::Matrix4x4::identity(), ); graphics.device.update_buffer(self.bone_transform_buffer, self.bone_map.borrow().transforms.as_slice(), 0, ); for &mut component in self.batches.iter() { component.shader_data.u_model_view_proj = transform; graphics.draw(&component.batch, &component.shader_data, frame); } } } #[deriving(Show)] #[vertex_format] struct Vertex { #[as_float] a_position: [f32,..3], #[as_float] a_normal: [f32,..3], #[as_float] a_tex_coord: [f32,..3], #[as_float]
// Create the buffer for the bone transformations. We fill this // up each time we draw, so no need to do it here. let u_bone_transformations: gfx::BufferHandle<Mat4> = graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic);
random_line_split
main.rs
::new(); let mut offsets = Vec::new(); let mut num_bones = 0u32; for mesh in scene.get_meshes().iter() { for bone in mesh.get_bones().iter() { let name = bone.name.to_string(); match bone_map.get(&name) { Some(_) => continue, None => { bone_map.insert(name, num_bones); offsets.push(bone.offset_matrix); num_bones += 1; } } } } BoneMap { bone_map: bone_map, offsets: offsets, transforms: Vec::from_elem(MAX_BONES, vecmath::mat4_id()), } } #[inline(always)] fn get_id(&self, name: &String) -> Option<u32> { match self.bone_map.get(name) { None => None, Some(val) => Some(*val), } } } struct ModelComponent { pub batch: ModelBatch, pub shader_data: ShaderParam, } struct
<'a> { pub vertices: Vec<Vertex>, pub indices: Vec<u32>, pub batches: Vec<ModelComponent>, pub scene: ai::Scene<'a>, pub bone_map: RefCell<BoneMap>, pub global_inverse: ai::Matrix4x4, pub bone_transform_buffer: gfx::BufferHandle<Mat4>, } #[inline(always)] fn lerp<S, T: Add<T,T> + Sub<T,T> + Mul<S,T>>(start: T, end: T, s: S) -> T { return start + (end - start) * s; } impl<'a> Model<'a> { fn from_file(ai_scene: ai::Scene<'a>, graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>, program: &gfx::ProgramHandle, state: &gfx::DrawState, texture_store: &TextureStore, ) -> Model<'a> { // calculate the space we need to allocate let mut num_vertices = 0; let mut num_indices = 0; for mesh in ai_scene.get_meshes().iter() { num_vertices += mesh.num_vertices; num_indices += mesh.num_faces * 3; } // prepare the data structures used to store the scene let mut vertices = Vec::with_capacity(num_vertices as uint); let mut indices = Vec::with_capacity(num_indices as uint); // The bone weights and ids. Each vertex may be influenced by upto // 4 bones let mut bone_weights: Vec<Vec4> = Vec::from_elem(num_vertices as uint, [0.0,..4]); let mut bone_ids: Vec<IVec4> = Vec::from_elem(num_vertices as uint, [0,..4]); let bone_map = BoneMap::new(&ai_scene); // stores the first index of each mesh, used for creating batches let mut start_indices = Vec::with_capacity(ai_scene.num_meshes as uint + 1); let mut materials = Vec::with_capacity(ai_scene.num_materials as uint); let mut batches = Vec::with_capacity(ai_scene.num_meshes as uint); // Create the buffer for the bone transformations. We fill this // up each time we draw, so no need to do it here. let u_bone_transformations: gfx::BufferHandle<Mat4> = graphics.device.create_buffer(MAX_BONES, gfx::BufferUsage::Dynamic); // find the textures used by this model from the list of materials for mat in ai_scene.get_materials().iter() { let texture_src = mat.get_texture(ai::material::TextureType::Diffuse, 0 ); match texture_src { Some(s) => { match texture_store.textures.get(&s) { Some(t) => materials.push(t), None => panic!("couldn't load texture: {}", s), } } None => { panic!("could read texture name from material: {}", texture_src); } } } // prepare the data for a format that can be loaded to the gpu { start_indices.push(0); for mesh in ai_scene.get_meshes().iter() { let vert_id_offset = vertices.len() as u32; // get all the bone information for this mesh for bone in mesh.get_bones().iter() { let bone_id = bone_map.get_id(&bone.name.to_string()); // println!("{}: Bone id and name: {} ===> {}", // mesh_num, bone_id, bone.name); let bone_id = match bone_id { None => panic!("Invaild bone reference"), Some(id) => id, }; 'next_weight: for vert_weight in bone.get_weights().iter() { let vertex_id = (vert_id_offset + vert_weight.vertex_id) as uint; for i in range(0u, 4) { if bone_ids[vertex_id][i] == 0 { bone_weights[vertex_id][i] = vert_weight.weight; bone_ids[vertex_id][i] = bone_id; continue 'next_weight; } } // assimp should have limited bone weights to 4 unreachable!(); } } let verts = mesh.get_vertices(); let norms = mesh.get_normals(); let tex_coords = mesh.get_texture_coords(); // fill up the vertex buffer for i in range(0u, verts.len()) { vertices.push( Vertex { a_position: verts[i].to_array(), a_normal: norms[i].to_array(), a_tex_coord: if tex_coords.len() == 0 { [0.0, 0.0, 0.0] } else { // only support 1 texture coord tex_coords[0][i].to_array() }, a_bone_weights: bone_weights[i + vert_id_offset as uint], a_bone_ids: bone_ids[i + vert_id_offset as uint], }); } // fill up the index buffer for face in mesh.get_faces().iter() { let face_indices = face.get_indices(); assert!(face_indices.len() == 3); indices.push(face_indices[0] + vert_id_offset); indices.push(face_indices[1] + vert_id_offset); indices.push(face_indices[2] + vert_id_offset); } start_indices.push(indices.len() as u32); } } // create the vertex and index buffers // generate the batches used to draw the object { let vert_buf = graphics.device.create_mesh(vertices.as_slice()); let ind_buf = graphics.device.create_buffer_static(indices.as_slice()); let mut buf_slices = Vec::with_capacity(ai_scene.num_meshes as uint + 1); for ind in start_indices.windows(2) { buf_slices.push(gfx::Slice { start: ind[0], end: ind[1], prim_type: gfx::TriangleList, // prim_type: gfx::LineStrip, kind: gfx::SliceKind::Index32(ind_buf, 0 as u32), }); } for (slice, mesh) in buf_slices.iter() .zip(ai_scene.get_meshes().iter()) { let shader_data = ShaderParam { u_model_view_proj: vecmath::mat4_id(), t_color: (*materials[mesh.material_index as uint], None), u_bone_transformations: u_bone_transformations.raw(), }; batches.push(ModelComponent { batch: graphics.make_batch(program, &vert_buf, *slice, state).unwrap(), shader_data: shader_data, }); } } Model { vertices: vertices, indices: indices, batches: batches, bone_map: RefCell::new(bone_map), bone_transform_buffer: u_bone_transformations, global_inverse: ai_scene.get_root_node().transformation.inverse(), scene: ai_scene, } } fn interpolate_position(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Vector3D { let keys = node.get_position_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for pos_keys in keys.windows(2) { // note: once we find a match, we return if time < pos_keys[1].time { let dt = pos_keys[1].time - pos_keys[0].time; // how far inbetween the frams we are on a scale from 0 to 1 let s = (time - pos_keys[0].time) / dt; return lerp(pos_keys[0].value, pos_keys[1].value, s as f32); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn interpolate_scaling(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Vector3D { let keys = node.get_scaling_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for scale_keys in keys.windows(2) { // note: once we find a match, we return if time < scale_keys[1].time { let dt = scale_keys[1].time - scale_keys[0].time; // how far inbetween the frams we are on a scale from 0 to 1 let s = (time - scale_keys[0].time) / dt; return lerp(scale_keys[0].value, scale_keys[1].value, s as f32); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn interpolate_rotation(&self, time: f64, node: &ai::animation::NodeAnim ) -> ai::Quaternion { let keys = node.get_rotation_keys(); // only one key, so no need to interpolate if keys.len() == 1 { return keys[0].value } // otherwise, find out which keys the given time falls between // and interpolate for rot_keys in keys.windows(2) { // note: once we find a match, we return if time < rot_keys[1].time { let dt = rot_keys[1].time - rot_keys[0].time; // how far inbetween the frames we are on a scale from 0 to 1 let s = (time - rot_keys[0].time) / dt; // nlerp return lerp(rot_keys[0].value, rot_keys[1].value, s as f32).normalize(); } } // get the last frame, if we didn't find a match return keys[keys.len()-1].value } fn update_bone_transforms(&self, time: f64, anim_num: uint, scene_node: &ai::scene::Node, parent_transform: &ai::Matrix4x4, ) { // calculate the transformation matrix for this node let animation = self.scene.get_animations()[anim_num]; let node_transform = match animation.find_node_anim(&scene_node.name) { Some(node_anim) => { self.interpolate_position(time, node_anim).translation_matrix() * self.interpolate_rotation(time, node_anim).rotation_matrix() * self.interpolate_scaling(time, node_anim).scaling_matrix() }, None => { scene_node.transformation } }; let node_to_global = *parent_transform * node_transform; let opt_id = { self.bone_map.borrow().get_id(&scene_node.name.to_string()) }; match opt_id { None => { }, Some(id) => { let offset = { self.bone_map.borrow().offsets[id as uint] }; { self.bone_map.borrow_mut().transforms[id as uint] = (self.global_inverse * node_to_global * offset) .transpose().to_array(); } } } for child in scene_node.get_children().iter() { self.update_bone_transforms(time, anim_num, *child, &node_to_global, ); } } fn draw(&mut self, graphics: &mut gfx::Graphics<gfx::GlDevice, gfx::GlCommandBuffer>, frame: &gfx::Frame, time: f64, transform: Mat4, ) { self.update_bone_transforms(time, 0, self.scene.get_root_node(), &ai::Matrix4x4::identity(), ); graphics.device.update_buffer(self.bone_transform_buffer, self.bone_map.borrow().transforms.as_slice(), 0, ); for &mut component in self.batches.iter() { component.shader_data.u_model_view_proj = transform; graphics.draw(&component.batch, &component.shader_data, frame); } } } #[deriving(Show)] #[vertex_format] struct Vertex { #[as_float] a_position: [f32,..3], #[as_float] a_normal: [f32,..3], #[as_float] a_tex_coord: [f32,..3], #[as_float]
Model
identifier_name
lib.rs
use bitflags::bitflags; use std::{ fmt, fs::{File, OpenOptions}, io::{self, prelude::*, Result, SeekFrom}, iter, mem::{self, MaybeUninit}, ops::{Deref, DerefMut}, os::unix::{ fs::OpenOptionsExt, io::AsRawFd, }, ptr, slice, }; mod arch; mod kernel; macro_rules! trace { ($($inner:expr),*) => {{ if cfg!(feature = "trace") { dbg!($($inner),*) } else { ($($inner),*) } }}; } fn e<T>(res: syscall::Result<T>) -> Result<T> { res.map_err(|err| io::Error::from_raw_os_error(err.errno)) } bitflags! { pub struct Flags: u64 { const STOP_PRE_SYSCALL = syscall::PTRACE_STOP_PRE_SYSCALL.bits(); const STOP_POST_SYSCALL = syscall::PTRACE_STOP_POST_SYSCALL.bits(); const STOP_SINGLESTEP = syscall::PTRACE_STOP_SINGLESTEP.bits(); const STOP_SIGNAL = syscall::PTRACE_STOP_SIGNAL.bits(); const STOP_BREAKPOINT = syscall::PTRACE_STOP_BREAKPOINT.bits(); const STOP_EXIT = syscall::PTRACE_STOP_EXIT.bits(); const STOP_ALL = Self::STOP_PRE_SYSCALL.bits | Self::STOP_POST_SYSCALL.bits | Self::STOP_SINGLESTEP.bits | Self::STOP_SIGNAL.bits | Self::STOP_BREAKPOINT.bits | Self::STOP_EXIT.bits; const EVENT_CLONE = syscall::PTRACE_EVENT_CLONE.bits(); const EVENT_ALL = Self::EVENT_CLONE.bits; const FLAG_IGNORE = syscall::PTRACE_FLAG_IGNORE.bits(); const FLAG_ALL = Self::FLAG_IGNORE.bits; } } pub type Pid = usize; #[derive(Clone, Copy, Debug)] pub struct IntRegisters(pub syscall::IntRegisters); impl IntRegisters { pub fn format_syscall_bare(&self) -> String { arch::format_syscall(None, &self) } pub fn format_syscall_full(&self, mem: &mut Memory) -> String { arch::format_syscall(Some(mem), &self) } pub fn return_value(&self) -> usize { arch::return_value(&self) } } impl Deref for IntRegisters { type Target = syscall::IntRegisters; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for IntRegisters { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Clone, Copy, Debug)] pub struct FloatRegisters(pub syscall::FloatRegisters); impl Deref for FloatRegisters { type Target = syscall::FloatRegisters; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for FloatRegisters { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum EventData { EventClone(usize), StopSignal(usize, usize), StopExit(usize), Unknown(usize, usize, usize, usize, usize, usize), } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Event { pub cause: Flags, pub data: EventData, } impl Event { pub fn new(inner: syscall::PtraceEvent) -> Self { Self { cause: Flags::from_bits_truncate(inner.cause.bits()), data: match inner.cause { syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a), syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b), syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a), _ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f), }, } } } pub struct Registers { pub float: File, pub int: File, } impl Registers { pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { float: File::open(format!("proc:{}/regs/float", pid))?, int: File::open(format!("proc:{}/regs/int", pid))?, }) } pub fn get_float(&mut self) -> Result<FloatRegisters> { let mut regs = syscall::FloatRegisters::default(); trace!(self.float.read(&mut regs)?, &regs); Ok(FloatRegisters(regs)) } pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> { trace!(self.float.write(&regs)?, &regs); Ok(()) } pub fn get_int(&mut self) -> Result<IntRegisters> { let mut regs = syscall::IntRegisters::default(); trace!(self.int.read(&mut regs)?, &regs); Ok(IntRegisters(regs)) } pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> { trace!(self.int.write(&regs)?, &regs); Ok(()) } } impl fmt::Debug for Registers { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Registers(...)") } } pub struct Memory { pub file: File, } impl Memory { pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { file: File::open(format!("proc:{}/mem", pid))?, }) } pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> { self.file.seek(SeekFrom::Start(address as u64))?; self.file.read_exact(memory)?; trace!(memory); Ok(()) } pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> { self.file.seek(SeekFrom::Start(address as u64))?; self.file.write_all(memory)?; trace!(memory); Ok(()) } /// Writes a software breakpoint to the specified memory address, and /// returns the previous instruction. pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> { let mut previous = [0]; self.read(address, &mut previous)?; arch::set_breakpoint(self, address)?; Ok(previous[0]) } pub fn cursor(&mut self) -> Result<u64> { self.file.seek(SeekFrom::Current(0)) } } impl fmt::Debug for Memory { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Memory(...)") } } pub struct Tracer { pub file: File, pub regs: Registers, pub mem: Memory, } impl Tracer { /// Attach to a tracer with the specified PID. This will stop it. pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { file: OpenOptions::new() .read(true) .write(true) .truncate(true) .open(format!("proc:{}/trace", pid))?, regs: Registers::attach(pid)?, mem: Memory::attach(pid)?, }) } /// Set a breakpoint on the next specified stop, and wait for the /// breakpoint to be reached. For convenience in the majority of /// use-cases, this panics on non-breakpoint events and returns /// the breaking event whenever the first matching breakpoint is /// hit. For being able to use non-breakpoint events, see the /// `next_event` function. pub fn next(&mut self, flags: Flags) -> Result<Event> { self.next_event(flags)?.from_callback(|event| { panic!( "`Tracer::next` should never be used to handle non-breakpoint events, see \ `Tracer::next_event` instead. Event: {:?}", event ) }) } /// Similarly to `next`, but instead of conveniently returning a
/// breakpoint event, it returns an event handler that lets you /// handle events yourself. pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> { trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?); Ok(EventHandler { inner: self }) } /// Convert this tracer to be nonblocking. Setting breakpoints /// will no longer wait by default, but you will gain access to a /// `wait` function which will do the same as in blocking /// mode. Useful for multiplexing tracers using the `event:` /// scheme. pub fn nonblocking(self) -> Result<NonblockTracer> { let old_flags = e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_GETFL, 0, ))?; let new_flags = old_flags | syscall::O_NONBLOCK; e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_SETFL, new_flags, ))?; Ok(NonblockTracer { old_flags: Some(old_flags), inner: self, }) } /// Same as `EventHandler::iter`, but does not rely on having an /// event handler. When only using a blocking tracer you shouldn't /// need to worry about this. pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> { let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4]; let mut i = 0; let mut len = 0; // I don't like this clone, but I don't want tracer.events() // to prevent tracer from being borrowed again. let mut file = self.file.try_clone()?; Ok(iter::from_fn(move || { if i >= len { len = match file.read(unsafe { slice::from_raw_parts_mut( buf.as_mut_ptr() as *mut u8, buf.len() * mem::size_of::<syscall::PtraceEvent>(), ) }) { Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(), Err(err) => return Some(Err(err)), }; if len == 0 { return None; } i = 0; } let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) }); trace!(&ret); i += 1; Some(Ok(ret)) })) } } impl fmt::Debug for Tracer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Tracer(...)") } } #[must_use = "The tracer won't block unless you wait for events"] pub struct EventHandler<'a> { inner: &'a mut Tracer, } impl<'a> EventHandler<'a> { /// Pop one event. Prefer the use of the `iter` function instead /// as it batches reads. Only reason for this would be to have /// control over exactly what gets requested from to the kernel. pub fn pop_one(&mut self) -> Result<Option<Event>> { let mut event = syscall::PtraceEvent::default(); match self.inner.file.read(&mut event)? { 0 => Ok(None), _ => Ok(Some(Event::new(event))), } } /// Returns an iterator over ptrace events. This is a blocking stream. pub fn iter(&self) -> Result<impl Iterator<Item = Result<Event>>> { self.inner.events() } /// Handle non-breakpoint events by calling a specified callback until /// breakpoint is reached pub fn from_callback<F, E>(self, mut callback: F) -> std::result::Result<Event, E> where F: FnMut(Event) -> std::result::Result<(), E>, E: From<io::Error>, { let mut events = self.iter()?; loop { let event = events.next().expect("events should be an infinite stream")?; if event.cause & Flags::EVENT_ALL == event.cause { callback(event)?; } else { break Ok(event); } } } /// Ignore non-blocking events, just acknowledge them and move on pub fn ignore(self) -> Result<Event> { self.from_callback(|_| Ok(())) } } pub struct NonblockTracer { old_flags: Option<usize>, inner: Tracer, } impl NonblockTracer { /// Similar to `Tracer::attach`, but opens directly in nonblocking /// mode which saves one system call. pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { old_flags: None, inner: Tracer { file: OpenOptions::new() .read(true) .write(true) .truncate(true) .custom_flags(syscall::O_NONBLOCK as i32) .open(format!("proc:{}/trace", pid))?, regs: Registers::attach(pid)?, mem: Memory::attach(pid)?, }, }) } /// Sets a breakpoint on the specified stop, without doing /// anything else: No handling of events, no getting what /// breakpoint actually caused this, no waiting for the /// breakpoint. pub fn next(&mut self, flags: Flags) -> Result<()> { trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?); Ok(()) } /// Stub that prevents you from accidentally calling `next_event` /// on the tracer, do not use. #[deprecated( since = "forever", note = "Do not use next_event on a nonblocking tracer" )] pub fn next_event(&mut self, _flags: Flags) -> Result<EventHandler> { panic!("Tried to use next_event on a nonblocking tracer") } /// Convert this tracer back to a blocking version. Any yet unread /// events are ignored. pub fn blocking(self) -> Result<Tracer> { self.events()?.for_each(|_| ()); let old_flags = match self.old_flags { Some(flags) => flags, None => { let flags = e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_GETFL, 0, ))?; flags &!syscall::O_NONBLOCK }, }; e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_SETFL, old_flags, ))?; Ok(self.inner) } } impl Deref for NonblockTracer { type Target = Tracer; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for NonblockTracer { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl fmt::Debug for NonblockTracer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "NonblockTracer(...)") } }
random_line_split
lib.rs
use bitflags::bitflags; use std::{ fmt, fs::{File, OpenOptions}, io::{self, prelude::*, Result, SeekFrom}, iter, mem::{self, MaybeUninit}, ops::{Deref, DerefMut}, os::unix::{ fs::OpenOptionsExt, io::AsRawFd, }, ptr, slice, }; mod arch; mod kernel; macro_rules! trace { ($($inner:expr),*) => {{ if cfg!(feature = "trace") { dbg!($($inner),*) } else { ($($inner),*) } }}; } fn e<T>(res: syscall::Result<T>) -> Result<T> { res.map_err(|err| io::Error::from_raw_os_error(err.errno)) } bitflags! { pub struct Flags: u64 { const STOP_PRE_SYSCALL = syscall::PTRACE_STOP_PRE_SYSCALL.bits(); const STOP_POST_SYSCALL = syscall::PTRACE_STOP_POST_SYSCALL.bits(); const STOP_SINGLESTEP = syscall::PTRACE_STOP_SINGLESTEP.bits(); const STOP_SIGNAL = syscall::PTRACE_STOP_SIGNAL.bits(); const STOP_BREAKPOINT = syscall::PTRACE_STOP_BREAKPOINT.bits(); const STOP_EXIT = syscall::PTRACE_STOP_EXIT.bits(); const STOP_ALL = Self::STOP_PRE_SYSCALL.bits | Self::STOP_POST_SYSCALL.bits | Self::STOP_SINGLESTEP.bits | Self::STOP_SIGNAL.bits | Self::STOP_BREAKPOINT.bits | Self::STOP_EXIT.bits; const EVENT_CLONE = syscall::PTRACE_EVENT_CLONE.bits(); const EVENT_ALL = Self::EVENT_CLONE.bits; const FLAG_IGNORE = syscall::PTRACE_FLAG_IGNORE.bits(); const FLAG_ALL = Self::FLAG_IGNORE.bits; } } pub type Pid = usize; #[derive(Clone, Copy, Debug)] pub struct IntRegisters(pub syscall::IntRegisters); impl IntRegisters { pub fn format_syscall_bare(&self) -> String { arch::format_syscall(None, &self) } pub fn format_syscall_full(&self, mem: &mut Memory) -> String { arch::format_syscall(Some(mem), &self) } pub fn return_value(&self) -> usize { arch::return_value(&self) } } impl Deref for IntRegisters { type Target = syscall::IntRegisters; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for IntRegisters { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Clone, Copy, Debug)] pub struct FloatRegisters(pub syscall::FloatRegisters); impl Deref for FloatRegisters { type Target = syscall::FloatRegisters; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for FloatRegisters { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum EventData { EventClone(usize), StopSignal(usize, usize), StopExit(usize), Unknown(usize, usize, usize, usize, usize, usize), } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Event { pub cause: Flags, pub data: EventData, } impl Event { pub fn new(inner: syscall::PtraceEvent) -> Self { Self { cause: Flags::from_bits_truncate(inner.cause.bits()), data: match inner.cause { syscall::PTRACE_EVENT_CLONE => EventData::EventClone(inner.a), syscall::PTRACE_STOP_SIGNAL => EventData::StopSignal(inner.a, inner.b), syscall::PTRACE_STOP_EXIT => EventData::StopExit(inner.a), _ => EventData::Unknown(inner.a, inner.b, inner.c, inner.d, inner.e, inner.f), }, } } } pub struct Registers { pub float: File, pub int: File, } impl Registers { pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { float: File::open(format!("proc:{}/regs/float", pid))?, int: File::open(format!("proc:{}/regs/int", pid))?, }) } pub fn get_float(&mut self) -> Result<FloatRegisters> { let mut regs = syscall::FloatRegisters::default(); trace!(self.float.read(&mut regs)?, &regs); Ok(FloatRegisters(regs)) } pub fn set_float(&mut self, regs: &FloatRegisters) -> Result<()> { trace!(self.float.write(&regs)?, &regs); Ok(()) } pub fn get_int(&mut self) -> Result<IntRegisters> { let mut regs = syscall::IntRegisters::default(); trace!(self.int.read(&mut regs)?, &regs); Ok(IntRegisters(regs)) } pub fn set_int(&mut self, regs: &IntRegisters) -> Result<()> { trace!(self.int.write(&regs)?, &regs); Ok(()) } } impl fmt::Debug for Registers { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Registers(...)") } } pub struct Memory { pub file: File, } impl Memory { pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { file: File::open(format!("proc:{}/mem", pid))?, }) } pub fn read(&mut self, address: *const u8, memory: &mut [u8]) -> Result<()> { self.file.seek(SeekFrom::Start(address as u64))?; self.file.read_exact(memory)?; trace!(memory); Ok(()) } pub fn write(&mut self, address: *const u8, memory: &[u8]) -> Result<()> { self.file.seek(SeekFrom::Start(address as u64))?; self.file.write_all(memory)?; trace!(memory); Ok(()) } /// Writes a software breakpoint to the specified memory address, and /// returns the previous instruction. pub fn set_breakpoint(&mut self, address: *const u8) -> Result<u8> { let mut previous = [0]; self.read(address, &mut previous)?; arch::set_breakpoint(self, address)?; Ok(previous[0]) } pub fn cursor(&mut self) -> Result<u64> { self.file.seek(SeekFrom::Current(0)) } } impl fmt::Debug for Memory { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Memory(...)") } } pub struct
{ pub file: File, pub regs: Registers, pub mem: Memory, } impl Tracer { /// Attach to a tracer with the specified PID. This will stop it. pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { file: OpenOptions::new() .read(true) .write(true) .truncate(true) .open(format!("proc:{}/trace", pid))?, regs: Registers::attach(pid)?, mem: Memory::attach(pid)?, }) } /// Set a breakpoint on the next specified stop, and wait for the /// breakpoint to be reached. For convenience in the majority of /// use-cases, this panics on non-breakpoint events and returns /// the breaking event whenever the first matching breakpoint is /// hit. For being able to use non-breakpoint events, see the /// `next_event` function. pub fn next(&mut self, flags: Flags) -> Result<Event> { self.next_event(flags)?.from_callback(|event| { panic!( "`Tracer::next` should never be used to handle non-breakpoint events, see \ `Tracer::next_event` instead. Event: {:?}", event ) }) } /// Similarly to `next`, but instead of conveniently returning a /// breakpoint event, it returns an event handler that lets you /// handle events yourself. pub fn next_event(&mut self, flags: Flags) -> Result<EventHandler> { trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?); Ok(EventHandler { inner: self }) } /// Convert this tracer to be nonblocking. Setting breakpoints /// will no longer wait by default, but you will gain access to a /// `wait` function which will do the same as in blocking /// mode. Useful for multiplexing tracers using the `event:` /// scheme. pub fn nonblocking(self) -> Result<NonblockTracer> { let old_flags = e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_GETFL, 0, ))?; let new_flags = old_flags | syscall::O_NONBLOCK; e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_SETFL, new_flags, ))?; Ok(NonblockTracer { old_flags: Some(old_flags), inner: self, }) } /// Same as `EventHandler::iter`, but does not rely on having an /// event handler. When only using a blocking tracer you shouldn't /// need to worry about this. pub fn events(&self) -> Result<impl Iterator<Item = Result<Event>>> { let mut buf = [MaybeUninit::<syscall::PtraceEvent>::uninit(); 4]; let mut i = 0; let mut len = 0; // I don't like this clone, but I don't want tracer.events() // to prevent tracer from being borrowed again. let mut file = self.file.try_clone()?; Ok(iter::from_fn(move || { if i >= len { len = match file.read(unsafe { slice::from_raw_parts_mut( buf.as_mut_ptr() as *mut u8, buf.len() * mem::size_of::<syscall::PtraceEvent>(), ) }) { Ok(n) => n / mem::size_of::<syscall::PtraceEvent>(), Err(err) => return Some(Err(err)), }; if len == 0 { return None; } i = 0; } let ret = Event::new(unsafe { ptr::read(buf[i].as_mut_ptr()) }); trace!(&ret); i += 1; Some(Ok(ret)) })) } } impl fmt::Debug for Tracer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Tracer(...)") } } #[must_use = "The tracer won't block unless you wait for events"] pub struct EventHandler<'a> { inner: &'a mut Tracer, } impl<'a> EventHandler<'a> { /// Pop one event. Prefer the use of the `iter` function instead /// as it batches reads. Only reason for this would be to have /// control over exactly what gets requested from to the kernel. pub fn pop_one(&mut self) -> Result<Option<Event>> { let mut event = syscall::PtraceEvent::default(); match self.inner.file.read(&mut event)? { 0 => Ok(None), _ => Ok(Some(Event::new(event))), } } /// Returns an iterator over ptrace events. This is a blocking stream. pub fn iter(&self) -> Result<impl Iterator<Item = Result<Event>>> { self.inner.events() } /// Handle non-breakpoint events by calling a specified callback until /// breakpoint is reached pub fn from_callback<F, E>(self, mut callback: F) -> std::result::Result<Event, E> where F: FnMut(Event) -> std::result::Result<(), E>, E: From<io::Error>, { let mut events = self.iter()?; loop { let event = events.next().expect("events should be an infinite stream")?; if event.cause & Flags::EVENT_ALL == event.cause { callback(event)?; } else { break Ok(event); } } } /// Ignore non-blocking events, just acknowledge them and move on pub fn ignore(self) -> Result<Event> { self.from_callback(|_| Ok(())) } } pub struct NonblockTracer { old_flags: Option<usize>, inner: Tracer, } impl NonblockTracer { /// Similar to `Tracer::attach`, but opens directly in nonblocking /// mode which saves one system call. pub fn attach(pid: Pid) -> Result<Self> { Ok(Self { old_flags: None, inner: Tracer { file: OpenOptions::new() .read(true) .write(true) .truncate(true) .custom_flags(syscall::O_NONBLOCK as i32) .open(format!("proc:{}/trace", pid))?, regs: Registers::attach(pid)?, mem: Memory::attach(pid)?, }, }) } /// Sets a breakpoint on the specified stop, without doing /// anything else: No handling of events, no getting what /// breakpoint actually caused this, no waiting for the /// breakpoint. pub fn next(&mut self, flags: Flags) -> Result<()> { trace!(flags, self.file.write(&flags.bits().to_ne_bytes())?); Ok(()) } /// Stub that prevents you from accidentally calling `next_event` /// on the tracer, do not use. #[deprecated( since = "forever", note = "Do not use next_event on a nonblocking tracer" )] pub fn next_event(&mut self, _flags: Flags) -> Result<EventHandler> { panic!("Tried to use next_event on a nonblocking tracer") } /// Convert this tracer back to a blocking version. Any yet unread /// events are ignored. pub fn blocking(self) -> Result<Tracer> { self.events()?.for_each(|_| ()); let old_flags = match self.old_flags { Some(flags) => flags, None => { let flags = e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_GETFL, 0, ))?; flags &!syscall::O_NONBLOCK }, }; e(syscall::fcntl( self.file.as_raw_fd() as usize, syscall::F_SETFL, old_flags, ))?; Ok(self.inner) } } impl Deref for NonblockTracer { type Target = Tracer; fn deref(&self) -> &Self::Target { &self.inner } } impl DerefMut for NonblockTracer { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl fmt::Debug for NonblockTracer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "NonblockTracer(...)") } }
Tracer
identifier_name
server.rs
use std::io::IoResult; use crypto::sha1::Sha1; use crypto::digest::Digest; use serialize::base64::{ToBase64, STANDARD}; use std::ascii::AsciiExt; use time; use std::io::{Listener, Acceptor}; use std::io::net::tcp::TcpListener; use std::io::net::tcp::TcpStream; use http::buffer::BufferedStream; use std::thread::Thread; use std::sync::mpsc::{channel, Sender, Receiver}; use http::server::{Server, Request, ResponseWriter}; use http::status::SwitchingProtocols; use http::headers::HeaderEnum; use http::headers::response::Header::ExtensionHeader; use http::headers::connection::Connection::Token; use http::method::Method::Get; pub use message::Payload::{Text, Binary, Empty}; pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp}; use message::Message; static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; pub trait WebSocketServer: Server { // called when a web socket connection is successfully established. // // this can't block! leaving implementation to trait user, in case they // want to custom scheduling, tracking clients, reconnect logic, etc. // // TODO: may want to send more info in, such as the connecting IP address? fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> (); // XXX: this is mostly a copy of the serve_forever fn in the Server trait. // rust-http needs some changes in order to avoid this duplication fn ws_serve_forever(self) { let config = self.get_config(); debug!("About to bind to {}", config.bind_address); let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() { Err(err) => { error!("bind or listen failed :-(: {}", err); return; }, Ok(acceptor) => acceptor, }; debug!("listening"); loop { let stream = match acceptor.accept() { Err(error) => { debug!("accept failed: {}", error); // Question: is this the correct thing to do? We should probably be more // intelligent, for there are some accept failures that are likely to be // permanent, such that continuing would be a very bad idea, such as // ENOBUFS/ENOMEM; and some where it should just be ignored, e.g. // ECONNABORTED. TODO. continue; }, Ok(socket) => socket, }; let child_self = self.clone(); Thread::spawn(move || { let mut stream = BufferedStream::new(stream); debug!("accepted connection"); let mut successful_handshake = false; loop { // A keep-alive loop, condition at end let (request, err_status) = Request::load(&mut stream); let close_connection = request.close_connection; let mut response = ResponseWriter::new(&mut stream); match err_status { Ok(()) => { successful_handshake = child_self.handle_possible_ws_request(request, &mut response); // Ensure that we actually do send a response: match response.try_write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, Err(status) => { // Uh oh, it's a response that I as a server cannot cope with. // No good user-agent should have caused this, so for the moment // at least I am content to send no body in the response. response.status = status; response.headers.content_length = Some(0); match response.write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, } // Ensure the request is flushed, any Transfer-Encoding completed, etc. match response.finish_response() { Err(err) => { error!("finishing response failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } if successful_handshake || close_connection { break; } } if successful_handshake { child_self.serve_websockets(stream).unwrap(); } }).detach(); } } fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> { let mut stream = stream.wrapped; let write_stream = stream.clone(); let (in_sender, in_receiver) = channel(); let (out_sender, out_receiver) = channel(); self.handle_ws_connect(in_receiver, out_sender); // write task Thread::spawn(move || { // ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44 // work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958 let mut write_stream = write_stream; loop { let message = out_receiver.recv().unwrap(); message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task } }).detach(); // read task, effectively the parent of the write task loop { let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error. debug!("message: {}", message); match message.opcode { CloseOp => { try!(stream.close_read()); try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client try!(stream.close_write()); break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any }, PingOp => { let pong = Message { payload: message.payload, opcode: PongOp }; try!(pong.send(&mut stream)); }, PongOp => (), _ => in_sender.send(message).unwrap() } } Ok(()) } fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String
let mut sh = Sha1::new(); let mut out = [0u8; 20]; sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice()); sh.result(out.as_mut_slice()); return out.to_base64(STANDARD); } // check if the http request is a web socket upgrade request, and return true if so. // otherwise, fall back on the regular http request handler fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool { // TODO allow configuration of endpoint for websocket match (r.method.clone(), r.headers.upgrade.clone()){ // (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade" (Get, Some(ref upgrade)) => { if!upgrade.as_slice().eq_ignore_ascii_case("websocket"){ self.handle_request(r, w); return false; } // TODO client must have the header "Connection: Upgrade" // // TODO The request MUST include a header field with the name // |Sec-WebSocket-Version|. The value of this header field MUST be 13. // WebSocket Opening Handshake w.status = SwitchingProtocols; w.headers.upgrade = Some(String::from_str("websocket")); // w.headers.transfer_encoding = None; w.headers.content_length = Some(0); w.headers.connection = Some(vec!(Token(String::from_str("Upgrade")))); w.headers.date = Some(time::now_utc()); w.headers.server = Some(String::from_str("rust-ws/0.1-pre")); for header in r.headers.iter() { debug!("Header {}: {}", header.header_name(), header.header_value()); } // NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) { Some(val) => { let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice()); w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept)); }, None => panic!() } return true; // successful_handshake }, (_, _) => self.handle_request(r, w) } return false; } }
{ // NOTE from RFC 6455 // // To prove that the handshake was received, the server has to take two // pieces of information and combine them to form a response. The first // piece of information comes from the |Sec-WebSocket-Key| header field // in the client handshake: // // Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== // // For this header field, the server has to take the value (as present // in the header field, e.g., the base64-encoded [RFC4648] version minus // any leading and trailing whitespace) and concatenate this with the // Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA- // 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by // network endpoints that do not understand the WebSocket Protocol. A // SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of // [RFC4648]), of this concatenation is then returned in the server's // handshake.
identifier_body
server.rs
use std::io::IoResult; use crypto::sha1::Sha1; use crypto::digest::Digest; use serialize::base64::{ToBase64, STANDARD}; use std::ascii::AsciiExt; use time; use std::io::{Listener, Acceptor}; use std::io::net::tcp::TcpListener; use std::io::net::tcp::TcpStream;
use http::server::{Server, Request, ResponseWriter}; use http::status::SwitchingProtocols; use http::headers::HeaderEnum; use http::headers::response::Header::ExtensionHeader; use http::headers::connection::Connection::Token; use http::method::Method::Get; pub use message::Payload::{Text, Binary, Empty}; pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp}; use message::Message; static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; pub trait WebSocketServer: Server { // called when a web socket connection is successfully established. // // this can't block! leaving implementation to trait user, in case they // want to custom scheduling, tracking clients, reconnect logic, etc. // // TODO: may want to send more info in, such as the connecting IP address? fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> (); // XXX: this is mostly a copy of the serve_forever fn in the Server trait. // rust-http needs some changes in order to avoid this duplication fn ws_serve_forever(self) { let config = self.get_config(); debug!("About to bind to {}", config.bind_address); let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() { Err(err) => { error!("bind or listen failed :-(: {}", err); return; }, Ok(acceptor) => acceptor, }; debug!("listening"); loop { let stream = match acceptor.accept() { Err(error) => { debug!("accept failed: {}", error); // Question: is this the correct thing to do? We should probably be more // intelligent, for there are some accept failures that are likely to be // permanent, such that continuing would be a very bad idea, such as // ENOBUFS/ENOMEM; and some where it should just be ignored, e.g. // ECONNABORTED. TODO. continue; }, Ok(socket) => socket, }; let child_self = self.clone(); Thread::spawn(move || { let mut stream = BufferedStream::new(stream); debug!("accepted connection"); let mut successful_handshake = false; loop { // A keep-alive loop, condition at end let (request, err_status) = Request::load(&mut stream); let close_connection = request.close_connection; let mut response = ResponseWriter::new(&mut stream); match err_status { Ok(()) => { successful_handshake = child_self.handle_possible_ws_request(request, &mut response); // Ensure that we actually do send a response: match response.try_write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, Err(status) => { // Uh oh, it's a response that I as a server cannot cope with. // No good user-agent should have caused this, so for the moment // at least I am content to send no body in the response. response.status = status; response.headers.content_length = Some(0); match response.write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, } // Ensure the request is flushed, any Transfer-Encoding completed, etc. match response.finish_response() { Err(err) => { error!("finishing response failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } if successful_handshake || close_connection { break; } } if successful_handshake { child_self.serve_websockets(stream).unwrap(); } }).detach(); } } fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> { let mut stream = stream.wrapped; let write_stream = stream.clone(); let (in_sender, in_receiver) = channel(); let (out_sender, out_receiver) = channel(); self.handle_ws_connect(in_receiver, out_sender); // write task Thread::spawn(move || { // ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44 // work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958 let mut write_stream = write_stream; loop { let message = out_receiver.recv().unwrap(); message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task } }).detach(); // read task, effectively the parent of the write task loop { let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error. debug!("message: {}", message); match message.opcode { CloseOp => { try!(stream.close_read()); try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client try!(stream.close_write()); break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any }, PingOp => { let pong = Message { payload: message.payload, opcode: PongOp }; try!(pong.send(&mut stream)); }, PongOp => (), _ => in_sender.send(message).unwrap() } } Ok(()) } fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String { // NOTE from RFC 6455 // // To prove that the handshake was received, the server has to take two // pieces of information and combine them to form a response. The first // piece of information comes from the |Sec-WebSocket-Key| header field // in the client handshake: // // Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== // // For this header field, the server has to take the value (as present // in the header field, e.g., the base64-encoded [RFC4648] version minus // any leading and trailing whitespace) and concatenate this with the // Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA- // 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by // network endpoints that do not understand the WebSocket Protocol. A // SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of // [RFC4648]), of this concatenation is then returned in the server's // handshake. let mut sh = Sha1::new(); let mut out = [0u8; 20]; sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice()); sh.result(out.as_mut_slice()); return out.to_base64(STANDARD); } // check if the http request is a web socket upgrade request, and return true if so. // otherwise, fall back on the regular http request handler fn handle_possible_ws_request(&self, r: Request, w: &mut ResponseWriter) -> bool { // TODO allow configuration of endpoint for websocket match (r.method.clone(), r.headers.upgrade.clone()){ // (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade" (Get, Some(ref upgrade)) => { if!upgrade.as_slice().eq_ignore_ascii_case("websocket"){ self.handle_request(r, w); return false; } // TODO client must have the header "Connection: Upgrade" // // TODO The request MUST include a header field with the name // |Sec-WebSocket-Version|. The value of this header field MUST be 13. // WebSocket Opening Handshake w.status = SwitchingProtocols; w.headers.upgrade = Some(String::from_str("websocket")); // w.headers.transfer_encoding = None; w.headers.content_length = Some(0); w.headers.connection = Some(vec!(Token(String::from_str("Upgrade")))); w.headers.date = Some(time::now_utc()); w.headers.server = Some(String::from_str("rust-ws/0.1-pre")); for header in r.headers.iter() { debug!("Header {}: {}", header.header_name(), header.header_value()); } // NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) { Some(val) => { let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice()); w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept)); }, None => panic!() } return true; // successful_handshake }, (_, _) => self.handle_request(r, w) } return false; } }
use http::buffer::BufferedStream; use std::thread::Thread; use std::sync::mpsc::{channel, Sender, Receiver};
random_line_split
server.rs
use std::io::IoResult; use crypto::sha1::Sha1; use crypto::digest::Digest; use serialize::base64::{ToBase64, STANDARD}; use std::ascii::AsciiExt; use time; use std::io::{Listener, Acceptor}; use std::io::net::tcp::TcpListener; use std::io::net::tcp::TcpStream; use http::buffer::BufferedStream; use std::thread::Thread; use std::sync::mpsc::{channel, Sender, Receiver}; use http::server::{Server, Request, ResponseWriter}; use http::status::SwitchingProtocols; use http::headers::HeaderEnum; use http::headers::response::Header::ExtensionHeader; use http::headers::connection::Connection::Token; use http::method::Method::Get; pub use message::Payload::{Text, Binary, Empty}; pub use message::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp}; use message::Message; static WEBSOCKET_SALT: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; pub trait WebSocketServer: Server { // called when a web socket connection is successfully established. // // this can't block! leaving implementation to trait user, in case they // want to custom scheduling, tracking clients, reconnect logic, etc. // // TODO: may want to send more info in, such as the connecting IP address? fn handle_ws_connect(&self, receiver: Receiver<Box<Message>>, sender: Sender<Box<Message>>) -> (); // XXX: this is mostly a copy of the serve_forever fn in the Server trait. // rust-http needs some changes in order to avoid this duplication fn ws_serve_forever(self) { let config = self.get_config(); debug!("About to bind to {}", config.bind_address); let mut acceptor = match TcpListener::bind((config.bind_address.ip.to_string().as_slice(), config.bind_address.port)).listen() { Err(err) => { error!("bind or listen failed :-(: {}", err); return; }, Ok(acceptor) => acceptor, }; debug!("listening"); loop { let stream = match acceptor.accept() { Err(error) => { debug!("accept failed: {}", error); // Question: is this the correct thing to do? We should probably be more // intelligent, for there are some accept failures that are likely to be // permanent, such that continuing would be a very bad idea, such as // ENOBUFS/ENOMEM; and some where it should just be ignored, e.g. // ECONNABORTED. TODO. continue; }, Ok(socket) => socket, }; let child_self = self.clone(); Thread::spawn(move || { let mut stream = BufferedStream::new(stream); debug!("accepted connection"); let mut successful_handshake = false; loop { // A keep-alive loop, condition at end let (request, err_status) = Request::load(&mut stream); let close_connection = request.close_connection; let mut response = ResponseWriter::new(&mut stream); match err_status { Ok(()) => { successful_handshake = child_self.handle_possible_ws_request(request, &mut response); // Ensure that we actually do send a response: match response.try_write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, Err(status) => { // Uh oh, it's a response that I as a server cannot cope with. // No good user-agent should have caused this, so for the moment // at least I am content to send no body in the response. response.status = status; response.headers.content_length = Some(0); match response.write_headers() { Err(err) => { error!("Writing headers failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } }, } // Ensure the request is flushed, any Transfer-Encoding completed, etc. match response.finish_response() { Err(err) => { error!("finishing response failed: {}", err); return; // Presumably bad connection, so give up. }, Ok(_) => (), } if successful_handshake || close_connection { break; } } if successful_handshake { child_self.serve_websockets(stream).unwrap(); } }).detach(); } } fn serve_websockets(&self, stream: BufferedStream<TcpStream>) -> IoResult<()> { let mut stream = stream.wrapped; let write_stream = stream.clone(); let (in_sender, in_receiver) = channel(); let (out_sender, out_receiver) = channel(); self.handle_ws_connect(in_receiver, out_sender); // write task Thread::spawn(move || { // ugh: https://github.com/mozilla/rust/blob/3dbc1c34e694f38daeef741cfffc558606443c15/src/test/run-pass/kindck-implicit-close-over-mut-var.rs#L40-L44 // work to fix this is ongoing here: https://github.com/mozilla/rust/issues/11958 let mut write_stream = write_stream; loop { let message = out_receiver.recv().unwrap(); message.send(&mut write_stream).unwrap(); // fails this task in case of an error; FIXME make sure this fails the read (parent) task } }).detach(); // read task, effectively the parent of the write task loop { let message = Message::load(&mut stream).unwrap(); // fails the task if there's an error. debug!("message: {}", message); match message.opcode { CloseOp => { try!(stream.close_read()); try!(message.send(&mut stream)); // complete close handeshake - send the same message right back at the client try!(stream.close_write()); break; // as this task dies, this should release the write task above, as well as the task set up in handle_ws_connection, if any }, PingOp => { let pong = Message { payload: message.payload, opcode: PongOp }; try!(pong.send(&mut stream)); }, PongOp => (), _ => in_sender.send(message).unwrap() } } Ok(()) } fn sec_websocket_accept(&self, sec_websocket_key: &str) -> String { // NOTE from RFC 6455 // // To prove that the handshake was received, the server has to take two // pieces of information and combine them to form a response. The first // piece of information comes from the |Sec-WebSocket-Key| header field // in the client handshake: // // Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== // // For this header field, the server has to take the value (as present // in the header field, e.g., the base64-encoded [RFC4648] version minus // any leading and trailing whitespace) and concatenate this with the // Globally Unique Identifier (GUID, [RFC4122]) "258EAFA5-E914-47DA- // 95CA-C5AB0DC85B11" in string form, which is unlikely to be used by // network endpoints that do not understand the WebSocket Protocol. A // SHA-1 hash (160 bits) [FIPS.180-3], base64-encoded (see Section 4 of // [RFC4648]), of this concatenation is then returned in the server's // handshake. let mut sh = Sha1::new(); let mut out = [0u8; 20]; sh.input_str((String::from_str(sec_websocket_key) + WEBSOCKET_SALT).as_slice()); sh.result(out.as_mut_slice()); return out.to_base64(STANDARD); } // check if the http request is a web socket upgrade request, and return true if so. // otherwise, fall back on the regular http request handler fn
(&self, r: Request, w: &mut ResponseWriter) -> bool { // TODO allow configuration of endpoint for websocket match (r.method.clone(), r.headers.upgrade.clone()){ // (&Get, &Some("websocket"), &Some(box [Token(box "Upgrade")])) => //\{ FIXME this doesn't work. but client must have the header "Connection: Upgrade" (Get, Some(ref upgrade)) => { if!upgrade.as_slice().eq_ignore_ascii_case("websocket"){ self.handle_request(r, w); return false; } // TODO client must have the header "Connection: Upgrade" // // TODO The request MUST include a header field with the name // |Sec-WebSocket-Version|. The value of this header field MUST be 13. // WebSocket Opening Handshake w.status = SwitchingProtocols; w.headers.upgrade = Some(String::from_str("websocket")); // w.headers.transfer_encoding = None; w.headers.content_length = Some(0); w.headers.connection = Some(vec!(Token(String::from_str("Upgrade")))); w.headers.date = Some(time::now_utc()); w.headers.server = Some(String::from_str("rust-ws/0.1-pre")); for header in r.headers.iter() { debug!("Header {}: {}", header.header_name(), header.header_value()); } // NOTE: think this is actually Sec-WebSocket-Key (capital Web[S]ocket), but rust-http normalizes header names match r.headers.extensions.get(&String::from_str("Sec-Websocket-Key")) { Some(val) => { let sec_websocket_accept = self.sec_websocket_accept((*val).as_slice()); w.headers.insert(ExtensionHeader(String::from_str("Sec-WebSocket-Accept"), sec_websocket_accept)); }, None => panic!() } return true; // successful_handshake }, (_, _) => self.handle_request(r, w) } return false; } }
handle_possible_ws_request
identifier_name
x25519.rs
use core::ops::{Deref, DerefMut}; use super::common::*; use super::error::Error; use super::field25519::*; const POINT_BYTES: usize = 32; /// Non-uniform output of a scalar multiplication. /// This represents a point on the curve, and should not be used directly as a /// cipher key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct DHOutput([u8; DHOutput::BYTES]); impl DHOutput { pub const BYTES: usize = 32; } impl Deref for DHOutput { type Target = [u8; DHOutput::BYTES]; /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for DHOutput { /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<DHOutput> for PublicKey { fn from(dh: DHOutput) -> Self { PublicKey(dh.0) } } impl From<DHOutput> for SecretKey { fn from(dh: DHOutput) -> Self { SecretKey(dh.0) } } impl Drop for DHOutput { fn drop(&mut self) { Mem::wipe(self.0) } } /// A public key. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub struct PublicKey([u8; POINT_BYTES]); impl PublicKey { /// Number of raw bytes in a public key. pub const BYTES: usize = POINT_BYTES; /// Creates a public key from raw bytes. pub fn new(pk: [u8; PublicKey::BYTES]) -> Self { PublicKey(pk) } /// Creates a public key from a slice. pub fn from_slice(pk: &[u8]) -> Result<Self, Error> { let mut pk_ = [0u8; PublicKey::BYTES]; if pk.len()!= pk_.len() { return Err(Error::InvalidPublicKey); } Fe::reject_noncanonical(pk)?; pk_.copy_from_slice(pk); Ok(PublicKey::new(pk_)) } /// Multiply a point by the cofactor, returning an error if the element is /// in a small-order group. pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> { let cofactor = [ 8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; self.ladder(&cofactor, 4) } /// Multiply the point represented by the public key by the scalar after /// clamping it pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> { let sk = sk.clamped(); Ok(DHOutput(self.ladder(&sk.0, 255)?)) } /// Multiply the point represented by the public key by the scalar WITHOUT /// CLAMPING pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> { self.clear_cofactor()?; Ok(DHOutput(self.ladder(&sk.0, 256)?)) } fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> { let x1 = Fe::from_bytes(&self.0); let mut x2 = FE_ONE; let mut z2 = FE_ZERO; let mut x3 = x1; let mut z3 = FE_ONE; let mut swap: u8 = 0; let mut pos = bits - 1; loop { let bit = (s[pos >> 3] >> (pos & 7)) & 1; swap ^= bit; Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); swap = bit; let a = x2 + z2; let b = x2 - z2; let aa = a.square(); let bb = b.square(); x2 = aa * bb; let e = aa - bb; let da = (x3 - z3) * a; let cb = (x3 + z3) * b; x3 = (da + cb).square(); z3 = x1 * ((da - cb).square()); z2 = e * (bb + (e.mul32(121666))); if pos == 0 { break; } pos -= 1; } Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); z2 = z2.invert(); x2 = x2 * z2; if x2.is_zero() { return Err(Error::WeakPublicKey); } Ok(x2.to_bytes()) } /// The Curve25519 base point #[inline] pub fn base_point() -> PublicKey { PublicKey(FE_CURVE25519_BASEPOINT.to_bytes()) } } impl Deref for PublicKey { type Target = [u8; PublicKey::BYTES]; /// Returns a public key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for PublicKey { /// Returns a public key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A secret key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SecretKey([u8; SecretKey::BYTES]); impl SecretKey { /// Number of bytes in a secret key. pub const BYTES: usize = 32; /// Creates a secret key from raw bytes. pub fn new(sk: [u8; SecretKey::BYTES]) -> Self { SecretKey(sk) } /// Creates a secret key from a slice. pub fn from_slice(sk: &[u8]) -> Result<Self, Error> { let mut sk_ = [0u8; SecretKey::BYTES]; if sk.len()!= sk_.len()
sk_.copy_from_slice(sk); Ok(SecretKey::new(sk_)) } /// Perform the X25519 clamping magic pub fn clamped(&self) -> SecretKey { let mut clamped = self.clone(); clamped[0] &= 248; clamped[31] &= 63; clamped[31] |= 64; clamped } /// Recover the public key pub fn recover_public_key(&self) -> Result<PublicKey, Error> { let sk = self.clamped(); Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?)) } /// Returns `Ok(())` if the given public key is the public counterpart of /// this secret key. /// Returns `Err(Error::InvalidPublicKey)` otherwise. pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> { let recovered_pk = self.recover_public_key()?; if recovered_pk!= *pk { return Err(Error::InvalidPublicKey); } Ok(()) } } impl Drop for SecretKey { fn drop(&mut self) { Mem::wipe(self.0) } } impl Deref for SecretKey { type Target = [u8; SecretKey::BYTES]; /// Returns a secret key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for SecretKey { /// Returns a secret key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A key pair. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct KeyPair { /// Public key part of the key pair. pub pk: PublicKey, /// Secret key part of the key pair. pub sk: SecretKey, } impl KeyPair { /// Generates a new key pair. #[cfg(feature = "random")] pub fn generate() -> KeyPair { let mut sk = [0u8; SecretKey::BYTES]; getrandom::getrandom(&mut sk).expect("getrandom"); if Fe::from_bytes(&sk).is_zero() { panic!("All-zero secret key"); } let sk = SecretKey(sk); let pk = sk .recover_public_key() .expect("generated public key is weak"); KeyPair { pk, sk } } /// Check that the public key is valid for the secret key. pub fn validate(&self) -> Result<(), Error> { self.sk.validate_public_key(&self.pk) } } #[cfg(not(feature = "disable-signatures"))] mod from_ed25519 { use super::super::{ edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey, SecretKey as EdSecretKey, }; use super::*; impl SecretKey { /// Convert an Ed25519 secret key to a X25519 secret key. pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> { let seed = edsk.seed(); let az: [u8; 64] = { let mut hash_output = sha512::Hash::hash(*seed); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; SecretKey::from_slice(&az[..32]) } } impl PublicKey { /// Convert an Ed25519 public key to a X25519 public key. pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> { let pk = PublicKey::from_slice( &edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?, )?; pk.clear_cofactor()?; Ok(pk) } } impl KeyPair { /// Convert an Ed25519 key pair to a X25519 key pair. pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> { let pk = PublicKey::from_ed25519(&edkp.pk)?; let sk = SecretKey::from_ed25519(&edkp.sk)?; Ok(KeyPair { pk, sk }) } } } #[cfg(not(feature = "disable-signatures"))] pub use from_ed25519::*; #[test] fn test_x25519() { let sk_1 = SecretKey::from_slice(&[ 1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]) .unwrap(); let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap(); assert_eq!(PublicKey::from(output), PublicKey::base_point()); let kp_a = KeyPair::generate(); let kp_b = KeyPair::generate(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[cfg(not(feature = "disable-signatures"))] #[test] fn test_x25519_map() { use super::KeyPair as EdKeyPair; let edkp_a = EdKeyPair::generate(); let edkp_b = EdKeyPair::generate(); let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap(); let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[test] #[cfg(all(not(feature = "disable-signatures"), feature = "random"))] fn test_x25519_invalid_keypair() { let kp1 = KeyPair::generate(); let kp2 = KeyPair::generate(); assert_eq!( kp1.sk.validate_public_key(&kp2.pk).unwrap_err(), Error::InvalidPublicKey ); assert_eq!( kp2.sk.validate_public_key(&kp1.pk).unwrap_err(), Error::InvalidPublicKey ); assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok()); assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok()); assert!(kp1.validate().is_ok()); }
{ return Err(Error::InvalidSecretKey); }
conditional_block
x25519.rs
use core::ops::{Deref, DerefMut}; use super::common::*; use super::error::Error; use super::field25519::*; const POINT_BYTES: usize = 32; /// Non-uniform output of a scalar multiplication. /// This represents a point on the curve, and should not be used directly as a /// cipher key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct DHOutput([u8; DHOutput::BYTES]); impl DHOutput { pub const BYTES: usize = 32; } impl Deref for DHOutput { type Target = [u8; DHOutput::BYTES]; /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for DHOutput { /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<DHOutput> for PublicKey { fn
(dh: DHOutput) -> Self { PublicKey(dh.0) } } impl From<DHOutput> for SecretKey { fn from(dh: DHOutput) -> Self { SecretKey(dh.0) } } impl Drop for DHOutput { fn drop(&mut self) { Mem::wipe(self.0) } } /// A public key. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub struct PublicKey([u8; POINT_BYTES]); impl PublicKey { /// Number of raw bytes in a public key. pub const BYTES: usize = POINT_BYTES; /// Creates a public key from raw bytes. pub fn new(pk: [u8; PublicKey::BYTES]) -> Self { PublicKey(pk) } /// Creates a public key from a slice. pub fn from_slice(pk: &[u8]) -> Result<Self, Error> { let mut pk_ = [0u8; PublicKey::BYTES]; if pk.len()!= pk_.len() { return Err(Error::InvalidPublicKey); } Fe::reject_noncanonical(pk)?; pk_.copy_from_slice(pk); Ok(PublicKey::new(pk_)) } /// Multiply a point by the cofactor, returning an error if the element is /// in a small-order group. pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> { let cofactor = [ 8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; self.ladder(&cofactor, 4) } /// Multiply the point represented by the public key by the scalar after /// clamping it pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> { let sk = sk.clamped(); Ok(DHOutput(self.ladder(&sk.0, 255)?)) } /// Multiply the point represented by the public key by the scalar WITHOUT /// CLAMPING pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> { self.clear_cofactor()?; Ok(DHOutput(self.ladder(&sk.0, 256)?)) } fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> { let x1 = Fe::from_bytes(&self.0); let mut x2 = FE_ONE; let mut z2 = FE_ZERO; let mut x3 = x1; let mut z3 = FE_ONE; let mut swap: u8 = 0; let mut pos = bits - 1; loop { let bit = (s[pos >> 3] >> (pos & 7)) & 1; swap ^= bit; Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); swap = bit; let a = x2 + z2; let b = x2 - z2; let aa = a.square(); let bb = b.square(); x2 = aa * bb; let e = aa - bb; let da = (x3 - z3) * a; let cb = (x3 + z3) * b; x3 = (da + cb).square(); z3 = x1 * ((da - cb).square()); z2 = e * (bb + (e.mul32(121666))); if pos == 0 { break; } pos -= 1; } Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); z2 = z2.invert(); x2 = x2 * z2; if x2.is_zero() { return Err(Error::WeakPublicKey); } Ok(x2.to_bytes()) } /// The Curve25519 base point #[inline] pub fn base_point() -> PublicKey { PublicKey(FE_CURVE25519_BASEPOINT.to_bytes()) } } impl Deref for PublicKey { type Target = [u8; PublicKey::BYTES]; /// Returns a public key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for PublicKey { /// Returns a public key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A secret key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SecretKey([u8; SecretKey::BYTES]); impl SecretKey { /// Number of bytes in a secret key. pub const BYTES: usize = 32; /// Creates a secret key from raw bytes. pub fn new(sk: [u8; SecretKey::BYTES]) -> Self { SecretKey(sk) } /// Creates a secret key from a slice. pub fn from_slice(sk: &[u8]) -> Result<Self, Error> { let mut sk_ = [0u8; SecretKey::BYTES]; if sk.len()!= sk_.len() { return Err(Error::InvalidSecretKey); } sk_.copy_from_slice(sk); Ok(SecretKey::new(sk_)) } /// Perform the X25519 clamping magic pub fn clamped(&self) -> SecretKey { let mut clamped = self.clone(); clamped[0] &= 248; clamped[31] &= 63; clamped[31] |= 64; clamped } /// Recover the public key pub fn recover_public_key(&self) -> Result<PublicKey, Error> { let sk = self.clamped(); Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?)) } /// Returns `Ok(())` if the given public key is the public counterpart of /// this secret key. /// Returns `Err(Error::InvalidPublicKey)` otherwise. pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> { let recovered_pk = self.recover_public_key()?; if recovered_pk!= *pk { return Err(Error::InvalidPublicKey); } Ok(()) } } impl Drop for SecretKey { fn drop(&mut self) { Mem::wipe(self.0) } } impl Deref for SecretKey { type Target = [u8; SecretKey::BYTES]; /// Returns a secret key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for SecretKey { /// Returns a secret key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A key pair. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct KeyPair { /// Public key part of the key pair. pub pk: PublicKey, /// Secret key part of the key pair. pub sk: SecretKey, } impl KeyPair { /// Generates a new key pair. #[cfg(feature = "random")] pub fn generate() -> KeyPair { let mut sk = [0u8; SecretKey::BYTES]; getrandom::getrandom(&mut sk).expect("getrandom"); if Fe::from_bytes(&sk).is_zero() { panic!("All-zero secret key"); } let sk = SecretKey(sk); let pk = sk .recover_public_key() .expect("generated public key is weak"); KeyPair { pk, sk } } /// Check that the public key is valid for the secret key. pub fn validate(&self) -> Result<(), Error> { self.sk.validate_public_key(&self.pk) } } #[cfg(not(feature = "disable-signatures"))] mod from_ed25519 { use super::super::{ edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey, SecretKey as EdSecretKey, }; use super::*; impl SecretKey { /// Convert an Ed25519 secret key to a X25519 secret key. pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> { let seed = edsk.seed(); let az: [u8; 64] = { let mut hash_output = sha512::Hash::hash(*seed); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; SecretKey::from_slice(&az[..32]) } } impl PublicKey { /// Convert an Ed25519 public key to a X25519 public key. pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> { let pk = PublicKey::from_slice( &edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?, )?; pk.clear_cofactor()?; Ok(pk) } } impl KeyPair { /// Convert an Ed25519 key pair to a X25519 key pair. pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> { let pk = PublicKey::from_ed25519(&edkp.pk)?; let sk = SecretKey::from_ed25519(&edkp.sk)?; Ok(KeyPair { pk, sk }) } } } #[cfg(not(feature = "disable-signatures"))] pub use from_ed25519::*; #[test] fn test_x25519() { let sk_1 = SecretKey::from_slice(&[ 1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]) .unwrap(); let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap(); assert_eq!(PublicKey::from(output), PublicKey::base_point()); let kp_a = KeyPair::generate(); let kp_b = KeyPair::generate(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[cfg(not(feature = "disable-signatures"))] #[test] fn test_x25519_map() { use super::KeyPair as EdKeyPair; let edkp_a = EdKeyPair::generate(); let edkp_b = EdKeyPair::generate(); let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap(); let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[test] #[cfg(all(not(feature = "disable-signatures"), feature = "random"))] fn test_x25519_invalid_keypair() { let kp1 = KeyPair::generate(); let kp2 = KeyPair::generate(); assert_eq!( kp1.sk.validate_public_key(&kp2.pk).unwrap_err(), Error::InvalidPublicKey ); assert_eq!( kp2.sk.validate_public_key(&kp1.pk).unwrap_err(), Error::InvalidPublicKey ); assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok()); assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok()); assert!(kp1.validate().is_ok()); }
from
identifier_name
x25519.rs
use core::ops::{Deref, DerefMut}; use super::common::*; use super::error::Error; use super::field25519::*; const POINT_BYTES: usize = 32; /// Non-uniform output of a scalar multiplication. /// This represents a point on the curve, and should not be used directly as a /// cipher key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct DHOutput([u8; DHOutput::BYTES]); impl DHOutput { pub const BYTES: usize = 32; } impl Deref for DHOutput { type Target = [u8; DHOutput::BYTES]; /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for DHOutput { /// Returns the output of the scalar multiplication as bytes. /// The output is not uniform, and should be hashed before use. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<DHOutput> for PublicKey { fn from(dh: DHOutput) -> Self { PublicKey(dh.0) } } impl From<DHOutput> for SecretKey { fn from(dh: DHOutput) -> Self { SecretKey(dh.0) } } impl Drop for DHOutput { fn drop(&mut self) { Mem::wipe(self.0) } } /// A public key. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub struct PublicKey([u8; POINT_BYTES]); impl PublicKey { /// Number of raw bytes in a public key. pub const BYTES: usize = POINT_BYTES; /// Creates a public key from raw bytes. pub fn new(pk: [u8; PublicKey::BYTES]) -> Self { PublicKey(pk) } /// Creates a public key from a slice. pub fn from_slice(pk: &[u8]) -> Result<Self, Error> { let mut pk_ = [0u8; PublicKey::BYTES]; if pk.len()!= pk_.len() { return Err(Error::InvalidPublicKey); } Fe::reject_noncanonical(pk)?; pk_.copy_from_slice(pk); Ok(PublicKey::new(pk_)) } /// Multiply a point by the cofactor, returning an error if the element is /// in a small-order group. pub fn clear_cofactor(&self) -> Result<[u8; PublicKey::BYTES], Error> { let cofactor = [ 8u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; self.ladder(&cofactor, 4) } /// Multiply the point represented by the public key by the scalar after /// clamping it pub fn dh(&self, sk: &SecretKey) -> Result<DHOutput, Error> { let sk = sk.clamped(); Ok(DHOutput(self.ladder(&sk.0, 255)?)) } /// Multiply the point represented by the public key by the scalar WITHOUT /// CLAMPING pub fn unclamped_mul(&self, sk: &SecretKey) -> Result<DHOutput, Error> { self.clear_cofactor()?; Ok(DHOutput(self.ladder(&sk.0, 256)?)) } fn ladder(&self, s: &[u8], bits: usize) -> Result<[u8; POINT_BYTES], Error> { let x1 = Fe::from_bytes(&self.0); let mut x2 = FE_ONE; let mut z2 = FE_ZERO; let mut x3 = x1; let mut z3 = FE_ONE; let mut swap: u8 = 0; let mut pos = bits - 1; loop { let bit = (s[pos >> 3] >> (pos & 7)) & 1; swap ^= bit; Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); swap = bit; let a = x2 + z2; let b = x2 - z2; let aa = a.square(); let bb = b.square(); x2 = aa * bb; let e = aa - bb; let da = (x3 - z3) * a; let cb = (x3 + z3) * b; x3 = (da + cb).square(); z3 = x1 * ((da - cb).square()); z2 = e * (bb + (e.mul32(121666))); if pos == 0 { break; } pos -= 1; } Fe::cswap2(&mut x2, &mut x3, &mut z2, &mut z3, swap); z2 = z2.invert(); x2 = x2 * z2; if x2.is_zero() { return Err(Error::WeakPublicKey); } Ok(x2.to_bytes()) } /// The Curve25519 base point #[inline] pub fn base_point() -> PublicKey { PublicKey(FE_CURVE25519_BASEPOINT.to_bytes()) } } impl Deref for PublicKey { type Target = [u8; PublicKey::BYTES]; /// Returns a public key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for PublicKey { /// Returns a public key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A secret key. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct SecretKey([u8; SecretKey::BYTES]); impl SecretKey { /// Number of bytes in a secret key. pub const BYTES: usize = 32; /// Creates a secret key from raw bytes. pub fn new(sk: [u8; SecretKey::BYTES]) -> Self { SecretKey(sk) } /// Creates a secret key from a slice. pub fn from_slice(sk: &[u8]) -> Result<Self, Error> { let mut sk_ = [0u8; SecretKey::BYTES]; if sk.len()!= sk_.len() { return Err(Error::InvalidSecretKey); } sk_.copy_from_slice(sk); Ok(SecretKey::new(sk_)) } /// Perform the X25519 clamping magic pub fn clamped(&self) -> SecretKey { let mut clamped = self.clone(); clamped[0] &= 248; clamped[31] &= 63; clamped[31] |= 64; clamped } /// Recover the public key pub fn recover_public_key(&self) -> Result<PublicKey, Error> { let sk = self.clamped(); Ok(PublicKey(PublicKey::base_point().ladder(&sk.0, 255)?)) } /// Returns `Ok(())` if the given public key is the public counterpart of /// this secret key. /// Returns `Err(Error::InvalidPublicKey)` otherwise. pub fn validate_public_key(&self, pk: &PublicKey) -> Result<(), Error> { let recovered_pk = self.recover_public_key()?; if recovered_pk!= *pk { return Err(Error::InvalidPublicKey); } Ok(()) } } impl Drop for SecretKey { fn drop(&mut self) { Mem::wipe(self.0) } } impl Deref for SecretKey { type Target = [u8; SecretKey::BYTES]; /// Returns a secret key as bytes. fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for SecretKey { /// Returns a secret key as mutable bytes. fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } /// A key pair. #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub struct KeyPair { /// Public key part of the key pair. pub pk: PublicKey, /// Secret key part of the key pair. pub sk: SecretKey, } impl KeyPair { /// Generates a new key pair. #[cfg(feature = "random")] pub fn generate() -> KeyPair { let mut sk = [0u8; SecretKey::BYTES]; getrandom::getrandom(&mut sk).expect("getrandom"); if Fe::from_bytes(&sk).is_zero() { panic!("All-zero secret key"); } let sk = SecretKey(sk); let pk = sk .recover_public_key() .expect("generated public key is weak"); KeyPair { pk, sk } } /// Check that the public key is valid for the secret key. pub fn validate(&self) -> Result<(), Error> { self.sk.validate_public_key(&self.pk) } } #[cfg(not(feature = "disable-signatures"))] mod from_ed25519 { use super::super::{ edwards25519, sha512, KeyPair as EdKeyPair, PublicKey as EdPublicKey, SecretKey as EdSecretKey, }; use super::*; impl SecretKey { /// Convert an Ed25519 secret key to a X25519 secret key. pub fn from_ed25519(edsk: &EdSecretKey) -> Result<SecretKey, Error> { let seed = edsk.seed(); let az: [u8; 64] = { let mut hash_output = sha512::Hash::hash(*seed); hash_output[0] &= 248; hash_output[31] &= 63; hash_output[31] |= 64; hash_output }; SecretKey::from_slice(&az[..32]) } } impl PublicKey { /// Convert an Ed25519 public key to a X25519 public key. pub fn from_ed25519(edpk: &EdPublicKey) -> Result<PublicKey, Error> { let pk = PublicKey::from_slice( &edwards25519::ge_to_x25519_vartime(edpk).ok_or(Error::InvalidPublicKey)?, )?; pk.clear_cofactor()?; Ok(pk) } } impl KeyPair { /// Convert an Ed25519 key pair to a X25519 key pair. pub fn from_ed25519(edkp: &EdKeyPair) -> Result<KeyPair, Error> { let pk = PublicKey::from_ed25519(&edkp.pk)?; let sk = SecretKey::from_ed25519(&edkp.sk)?; Ok(KeyPair { pk, sk }) } } }
let sk_1 = SecretKey::from_slice(&[ 1u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]) .unwrap(); let output = PublicKey::base_point().unclamped_mul(&sk_1).unwrap(); assert_eq!(PublicKey::from(output), PublicKey::base_point()); let kp_a = KeyPair::generate(); let kp_b = KeyPair::generate(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[cfg(not(feature = "disable-signatures"))] #[test] fn test_x25519_map() { use super::KeyPair as EdKeyPair; let edkp_a = EdKeyPair::generate(); let edkp_b = EdKeyPair::generate(); let kp_a = KeyPair::from_ed25519(&edkp_a).unwrap(); let kp_b = KeyPair::from_ed25519(&edkp_b).unwrap(); let output_a = kp_b.pk.dh(&kp_a.sk).unwrap(); let output_b = kp_a.pk.dh(&kp_b.sk).unwrap(); assert_eq!(output_a, output_b); } #[test] #[cfg(all(not(feature = "disable-signatures"), feature = "random"))] fn test_x25519_invalid_keypair() { let kp1 = KeyPair::generate(); let kp2 = KeyPair::generate(); assert_eq!( kp1.sk.validate_public_key(&kp2.pk).unwrap_err(), Error::InvalidPublicKey ); assert_eq!( kp2.sk.validate_public_key(&kp1.pk).unwrap_err(), Error::InvalidPublicKey ); assert!(kp1.sk.validate_public_key(&kp1.pk).is_ok()); assert!(kp2.sk.validate_public_key(&kp2.pk).is_ok()); assert!(kp1.validate().is_ok()); }
#[cfg(not(feature = "disable-signatures"))] pub use from_ed25519::*; #[test] fn test_x25519() {
random_line_split
physically_monotonic.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Implementation of [crate::plan::interpret::Interpreter] for inference //! of physical monotonicity in single-time dataflows. use std::cmp::Reverse; use std::collections::BTreeSet; use std::marker::PhantomData; use differential_dataflow::lattice::Lattice; use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc}; use mz_repr::{Diff, GlobalId, Row}; use timely::PartialOrder; use crate::plan::interpret::{BoundedLattice, Context, Interpreter}; use crate::plan::join::JoinPlan; use crate::plan::reduce::{KeyValPlan, ReducePlan}; use crate::plan::threshold::ThresholdPlan; use crate::plan::top_k::TopKPlan; use crate::plan::{AvailableCollections, GetPlan}; /// Represents a boolean physical monotonicity property, where the bottom value /// is true (i.e., physically monotonic) and the top value is false (i.e. not /// physically monotonic). #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub struct PhysicallyMonotonic(pub bool); impl BoundedLattice for PhysicallyMonotonic { fn top() -> Self { PhysicallyMonotonic(false) } fn bottom() -> Self { PhysicallyMonotonic(true) } } impl Lattice for PhysicallyMonotonic { fn join(&self, other: &Self) -> Self { PhysicallyMonotonic(self.0 && other.0) } fn meet(&self, other: &Self) -> Self { PhysicallyMonotonic(self.0 || other.0) } } impl PartialOrder for PhysicallyMonotonic { fn less_equal(&self, other: &Self) -> bool { // We employ `Reverse` ordering for `bool` here to be consistent with // the choice of `top()` being false and `bottom()` being true. Reverse::<bool>(self.0) <= Reverse::<bool>(other.0) } } /// Provides a concrete implementation of an interpreter that determines if /// the output of `Plan` expressions is physically monotonic in a single-time /// dataflow, potentially taking into account judgments about its inputs. We /// note that in a single-time dataflow, expressions in non-recursive contexts /// (i.e., outside of `LetRec` values) process streams that are at a minimum /// logically monotonic, i.e., may contain retractions but would cease to do /// so if consolidated. Detecting physical monotonicity, i.e., the absence /// of retractions in a stream, enables us to disable forced consolidation /// whenever possible. #[derive(Debug)] pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> { monotonic_ids: &'a BTreeSet<GlobalId>, _phantom: PhantomData<T>, } impl<'a, T> SingleTimeMonotonic<'a, T> { /// Instantiates an interpreter for single-time physical monotonicity /// analysis. pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self { SingleTimeMonotonic { monotonic_ids, _phantom: Default::default(), } } } impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> { type Domain = PhysicallyMonotonic; fn constant( &self, _ctx: &Context<Self::Domain>, rows: &Result<Vec<(Row, T, Diff)>, EvalError>, ) -> Self::Domain { // A constant is physically monotonic iff the constant is an `EvalError` // or all its rows have `Diff` values greater than zero. PhysicallyMonotonic( rows.as_ref() .map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)), ) } fn
( &self, ctx: &Context<Self::Domain>, id: &Id, _keys: &AvailableCollections, _plan: &GetPlan, ) -> Self::Domain { // A get operator yields physically monotonic output iff the corresponding // `Plan::Get` is on a local or global ID that is known to provide physically // monotonic input. The way this becomes know is through the interpreter itself // for non-recursive local IDs or through configuration for the global IDs of // monotonic sources and indexes. Recursive local IDs are always assumed to // break physical monotonicity. // TODO(vmarcos): Consider in the future if we can ascertain whether the // restrictions on recursive local IDs can be relaxed to take into account only // the interpreter judgement directly. PhysicallyMonotonic(match id { Id::Local(id) => ctx .bindings .get(id) .map_or(false, |entry|!entry.is_rec && entry.value.0), Id::Global(id) => self.monotonic_ids.contains(id), }) } fn mfp( &self, _ctx: &Context<Self::Domain>, input: Self::Domain, _mfp: &MapFilterProject, _input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>, ) -> Self::Domain { // In a single-time context, we just propagate the monotonicity // status of the input input } fn flat_map( &self, _ctx: &Context<Self::Domain>, input: Self::Domain, _func: &TableFunc, _exprs: &Vec<MirScalarExpr>, _mfp: &MapFilterProject, _input_key: &Option<Vec<MirScalarExpr>>, ) -> Self::Domain { // In a single-time context, we just propagate the monotonicity // status of the input input } fn join( &self, _ctx: &Context<Self::Domain>, inputs: Vec<Self::Domain>, _plan: &JoinPlan, ) -> Self::Domain { // When we see a join, we must consider that the inputs could have // been `Plan::Get`s on arrangements. These are not in general safe // wrt. producing physically monotonic data. So here, we conservatively // judge that output of a join to be physically monotonic iff all // inputs are physically monotonic. PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0)) } fn reduce( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _key_val_plan: &KeyValPlan, _plan: &ReducePlan, _input_key: &Option<Vec<MirScalarExpr>>, ) -> Self::Domain { // In a recursive context, reduce will advance across timestamps // and may need to retract. Outside of a recursive context, the // fact that the dataflow is single-time implies no retraction // is emitted out of reduce. This makes the output be physically // monotonic, regardless of the input judgment. All `ReducePlan` // variants behave the same in this respect. PhysicallyMonotonic(!ctx.is_rec) } fn top_k( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _top_k_plan: &TopKPlan, ) -> Self::Domain { // Top-k behaves like a reduction, producing physically monotonic // output when exposed to a single time (i.e., when the context is // non-recursive). Note that even a monotonic top-k will consolidate // if necessary to ensure this property. PhysicallyMonotonic(!ctx.is_rec) } fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain { // Negation produces retractions, so it breaks physical monotonicity. PhysicallyMonotonic(false) } fn threshold( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _threshold_plan: &ThresholdPlan, ) -> Self::Domain { // Thresholding is a special kind of reduction, so the judgment // here is the same as for reduce. PhysicallyMonotonic(!ctx.is_rec) } fn union( &self, _ctx: &Context<Self::Domain>, inputs: Vec<Self::Domain>, _consolidate_output: bool, ) -> Self::Domain { // Union just concatenates the inputs, so is physically monotonic iff // all inputs are physically monotonic. // (Even when we do consolidation, we can't be certain that a negative diff from an input // is actually cancelled out. For example, Union outputs negative diffs when it's part of // the EXCEPT pattern.) PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0)) } fn arrange_by( &self, _ctx: &Context<Self::Domain>, input: Self::Domain, _forms: &AvailableCollections, _input_key: &Option<Vec<MirScalarExpr>>, _input_mfp: &MapFilterProject, ) -> Self::Domain { // `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it // makes sure that the requested `forms` are present and builds them only // if not already available. Many `forms` may be requested, as the downstream // consumers of this operator may be many different ones (as we support plan graphs, // not only trees). The `forms` include arrangements, but also just the collection // in `raw` form. So for example, if the input is arranged, then `ArrangeBy` could // be used to request a collection instead. `ArrangeBy` will only build an arrangement // from scratch when the input is not already arranged in a requested `form`. In our // physical monotonicity analysis, we presently cannot assert whether only arrangements // that `ArrangeBy` built will be used by downstream consumers, or if other `forms` that // do not preserve physical monotonicity would be accessed instead. So we conservatively // return the physical monotonicity judgment made for the input. // TODO(vmarcos): Consider in the future enriching the analysis to track physical // monotonicity not by the output of an operator, but by `forms` made available for each // collection. With this information, we could eventually make more refined judgements // at the points of use. input } }
get
identifier_name
physically_monotonic.rs
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! Implementation of [crate::plan::interpret::Interpreter] for inference //! of physical monotonicity in single-time dataflows. use std::cmp::Reverse; use std::collections::BTreeSet; use std::marker::PhantomData; use differential_dataflow::lattice::Lattice; use mz_expr::{EvalError, Id, MapFilterProject, MirScalarExpr, TableFunc}; use mz_repr::{Diff, GlobalId, Row}; use timely::PartialOrder; use crate::plan::interpret::{BoundedLattice, Context, Interpreter}; use crate::plan::join::JoinPlan; use crate::plan::reduce::{KeyValPlan, ReducePlan}; use crate::plan::threshold::ThresholdPlan; use crate::plan::top_k::TopKPlan; use crate::plan::{AvailableCollections, GetPlan}; /// Represents a boolean physical monotonicity property, where the bottom value /// is true (i.e., physically monotonic) and the top value is false (i.e. not /// physically monotonic). #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub struct PhysicallyMonotonic(pub bool); impl BoundedLattice for PhysicallyMonotonic { fn top() -> Self { PhysicallyMonotonic(false) } fn bottom() -> Self { PhysicallyMonotonic(true) } } impl Lattice for PhysicallyMonotonic { fn join(&self, other: &Self) -> Self { PhysicallyMonotonic(self.0 && other.0) } fn meet(&self, other: &Self) -> Self { PhysicallyMonotonic(self.0 || other.0) } } impl PartialOrder for PhysicallyMonotonic { fn less_equal(&self, other: &Self) -> bool { // We employ `Reverse` ordering for `bool` here to be consistent with // the choice of `top()` being false and `bottom()` being true. Reverse::<bool>(self.0) <= Reverse::<bool>(other.0) } } /// Provides a concrete implementation of an interpreter that determines if /// the output of `Plan` expressions is physically monotonic in a single-time /// dataflow, potentially taking into account judgments about its inputs. We /// note that in a single-time dataflow, expressions in non-recursive contexts /// (i.e., outside of `LetRec` values) process streams that are at a minimum /// logically monotonic, i.e., may contain retractions but would cease to do /// so if consolidated. Detecting physical monotonicity, i.e., the absence /// of retractions in a stream, enables us to disable forced consolidation /// whenever possible. #[derive(Debug)] pub struct SingleTimeMonotonic<'a, T = mz_repr::Timestamp> { monotonic_ids: &'a BTreeSet<GlobalId>, _phantom: PhantomData<T>, } impl<'a, T> SingleTimeMonotonic<'a, T> { /// Instantiates an interpreter for single-time physical monotonicity /// analysis. pub fn new(monotonic_ids: &'a BTreeSet<GlobalId>) -> Self { SingleTimeMonotonic { monotonic_ids, _phantom: Default::default(), } } } impl<T> Interpreter<T> for SingleTimeMonotonic<'_, T> { type Domain = PhysicallyMonotonic; fn constant( &self, _ctx: &Context<Self::Domain>, rows: &Result<Vec<(Row, T, Diff)>, EvalError>, ) -> Self::Domain { // A constant is physically monotonic iff the constant is an `EvalError` // or all its rows have `Diff` values greater than zero. PhysicallyMonotonic( rows.as_ref() .map_or(true, |rows| rows.iter().all(|(_, _, diff)| *diff > 0)), ) } fn get( &self, ctx: &Context<Self::Domain>, id: &Id, _keys: &AvailableCollections, _plan: &GetPlan, ) -> Self::Domain { // A get operator yields physically monotonic output iff the corresponding // `Plan::Get` is on a local or global ID that is known to provide physically // monotonic input. The way this becomes know is through the interpreter itself // for non-recursive local IDs or through configuration for the global IDs of // monotonic sources and indexes. Recursive local IDs are always assumed to // break physical monotonicity. // TODO(vmarcos): Consider in the future if we can ascertain whether the // restrictions on recursive local IDs can be relaxed to take into account only // the interpreter judgement directly. PhysicallyMonotonic(match id { Id::Local(id) => ctx .bindings .get(id) .map_or(false, |entry|!entry.is_rec && entry.value.0), Id::Global(id) => self.monotonic_ids.contains(id), }) } fn mfp( &self, _ctx: &Context<Self::Domain>, input: Self::Domain, _mfp: &MapFilterProject, _input_key_val: &Option<(Vec<MirScalarExpr>, Option<Row>)>, ) -> Self::Domain { // In a single-time context, we just propagate the monotonicity // status of the input input } fn flat_map( &self, _ctx: &Context<Self::Domain>, input: Self::Domain, _func: &TableFunc, _exprs: &Vec<MirScalarExpr>, _mfp: &MapFilterProject, _input_key: &Option<Vec<MirScalarExpr>>, ) -> Self::Domain { // In a single-time context, we just propagate the monotonicity // status of the input input } fn join( &self, _ctx: &Context<Self::Domain>, inputs: Vec<Self::Domain>, _plan: &JoinPlan, ) -> Self::Domain { // When we see a join, we must consider that the inputs could have // been `Plan::Get`s on arrangements. These are not in general safe // wrt. producing physically monotonic data. So here, we conservatively // judge that output of a join to be physically monotonic iff all // inputs are physically monotonic. PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0)) } fn reduce( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _key_val_plan: &KeyValPlan, _plan: &ReducePlan, _input_key: &Option<Vec<MirScalarExpr>>, ) -> Self::Domain { // In a recursive context, reduce will advance across timestamps // and may need to retract. Outside of a recursive context, the // fact that the dataflow is single-time implies no retraction // is emitted out of reduce. This makes the output be physically // monotonic, regardless of the input judgment. All `ReducePlan` // variants behave the same in this respect. PhysicallyMonotonic(!ctx.is_rec) } fn top_k( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _top_k_plan: &TopKPlan, ) -> Self::Domain { // Top-k behaves like a reduction, producing physically monotonic // output when exposed to a single time (i.e., when the context is // non-recursive). Note that even a monotonic top-k will consolidate // if necessary to ensure this property. PhysicallyMonotonic(!ctx.is_rec) } fn negate(&self, _ctx: &Context<Self::Domain>, _input: Self::Domain) -> Self::Domain { // Negation produces retractions, so it breaks physical monotonicity. PhysicallyMonotonic(false) } fn threshold( &self, ctx: &Context<Self::Domain>, _input: Self::Domain, _threshold_plan: &ThresholdPlan, ) -> Self::Domain { // Thresholding is a special kind of reduction, so the judgment // here is the same as for reduce. PhysicallyMonotonic(!ctx.is_rec) } fn union( &self, _ctx: &Context<Self::Domain>, inputs: Vec<Self::Domain>, _consolidate_output: bool, ) -> Self::Domain { // Union just concatenates the inputs, so is physically monotonic iff // all inputs are physically monotonic. // (Even when we do consolidation, we can't be certain that a negative diff from an input // is actually cancelled out. For example, Union outputs negative diffs when it's part of // the EXCEPT pattern.) PhysicallyMonotonic(inputs.iter().all(|monotonic| monotonic.0)) }
&self, _ctx: &Context<Self::Domain>, input: Self::Domain, _forms: &AvailableCollections, _input_key: &Option<Vec<MirScalarExpr>>, _input_mfp: &MapFilterProject, ) -> Self::Domain { // `Plan::ArrangeBy` is better thought of as `ensure_collections`, i.e., it // makes sure that the requested `forms` are present and builds them only // if not already available. Many `forms` may be requested, as the downstream // consumers of this operator may be many different ones (as we support plan graphs, // not only trees). The `forms` include arrangements, but also just the collection // in `raw` form. So for example, if the input is arranged, then `ArrangeBy` could // be used to request a collection instead. `ArrangeBy` will only build an arrangement // from scratch when the input is not already arranged in a requested `form`. In our // physical monotonicity analysis, we presently cannot assert whether only arrangements // that `ArrangeBy` built will be used by downstream consumers, or if other `forms` that // do not preserve physical monotonicity would be accessed instead. So we conservatively // return the physical monotonicity judgment made for the input. // TODO(vmarcos): Consider in the future enriching the analysis to track physical // monotonicity not by the output of an operator, but by `forms` made available for each // collection. With this information, we could eventually make more refined judgements // at the points of use. input } }
fn arrange_by(
random_line_split
web.rs
//! This is the initial MVP of the events service to get the BDD tests to work use db; use models::user::IOModel; use models::user::pg::PgModel as UserModel; use rouille; use rouille::input::post; use rouille::{Request, Response}; use services::user; use services::user::Service as UserService; use std::collections::HashMap; use std::error::Error; use std::fmt; use std::io; use std::iter::FromIterator; use std::str::FromStr; use uuid::Uuid; // // Runs a web server that passes the BDD tests // pub fn run() { eprintln!("Listening on 0.0.0.0:8080"); rouille::start_server("0.0.0.0:8080", |request| { rouille::log(request, io::stderr(), || { let conn = &db::connection(); let user_model = &UserModel::new(conn); let user_service = &UserService::new(user_model, b"...."); router!(request, (GET) (/status) => { status(user_model) }, (POST) (/oauth/register) => { oauth_register(user_service, request) }, (GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) }, (POST) (/oauth/token) => { oauth_token(user_service, request) }, (GET) (/oauth/me) => { me(user_service, request) }, _ => Response::empty_404() ) }) }) } // // Handlers // #[derive(Serialize, Debug)] struct Status<'a> { pub status: &'a str, } /// this is the status endpoint fn status(user_model: &UserModel) -> Response { let status = user_model .find(&Uuid::new_v4()) .map(|_| Status { status: "up" }) .unwrap_or_else(|_| Status { status: "down" }); Response::json(&status) } #[derive(Deserialize)] struct RegisterForm { name: String, password: String, email: String, } /// this is the user registration endpoint /// /// This accepts a json POST of [`RegisterForm`] fn oauth_register(user_service: &UserService, request: &Request) -> Response { let data: RegisterForm = try_or_400!(rouille::input::json_input(request)); let req = user::RegisterRequest { name: &data.name, password: &data.password, email: &data.email, }; user_service .register(&req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the user confirmation endpoint /// /// This is a GET request for a query string of `?confirm_token` fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response { let confirm_token: String = try_or_400!( request .get_param("confirm_token") .ok_or(WebError::MissingConfirmToken) ); let req = &user::ConfirmNewUserRequest { confirm_token: &confirm_token, }; user_service .confirm_new_user(req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the oauth token endpoint for making password or refresh grants against /// /// This follows the protocol set up by the following specs /// /// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2) /// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6) /// fn oauth_token(user_service: &UserService, request: &Request) -> Response { let form = &try_or_400!(post::raw_urlencoded_post_input(request)); let grant_type = try_or_400!(find_grant_type(form)); match grant_type { GrantType::Password => { let req = &try_or_400!(form_to_password_grant(form)); user_service .password_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } GrantType::Refresh => { let req = &try_or_400!(form_to_refresh_grant(form)); user_service .refresh_token_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } } } /// The current user handler /// /// This requires a `Authorization: Bearer {access_token}` header to make the request fn me(user_service: &UserService, request: &Request) -> Response { let access_token = request.header("Authorization") .and_then(move |x| x.get(7..)) // Get everything after "Bearer " .unwrap_or(""); let req = &user::CurrentUserRequest { access_token }; user_service .current_user(req) .map(Response::from) .unwrap_or_else(Response::from) } // Cenverters // impl From<user::CurrentUserResponse> for Response { fn from(result: user::CurrentUserResponse) -> Self { Response::json(&result) } } impl From<user::AccessTokenResponse> for Response { fn from(result: user::AccessTokenResponse) -> Self { Response::json(&result) } } impl From<user::ConfirmNewUserResponse> for Response { fn from(result: user::ConfirmNewUserResponse) -> Self { Response::json(&result) } } impl From<user::RegisterResponse> for Response { fn from(result: user::RegisterResponse) -> Self { Response::json(&result) } } /// /// This is a private Error type for things that can go wrong /// #[derive(Debug, PartialEq)] enum WebError { MissingConfirmToken, MissingPassword, MissingUsername, MissingRefreshToken, InvalidGrantType, } impl fmt::Display for WebError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } impl Error for WebError { fn description(&self) -> &str { use self::WebError::*; match *self { MissingUsername => "missing username", MissingPassword => "missing password", MissingRefreshToken => "missing refresh_token", MissingConfirmToken => "missing confirm token", InvalidGrantType => "invalid grant type", } } } impl From<user::ServiceError> for Response { fn from(err: user::ServiceError) -> Self { use services::user::ServiceError::*; match err { InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400), PermissionDenied => Response::text("").with_status_code(403), UserExists => Response::text("UserExists").with_status_code(403), DBError(_) => Response::text("").with_status_code(500), } } } /// /// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"` /// /// Note: We may want to move this to the service module #[derive(Debug, PartialEq)] enum GrantType { Password, Refresh, } impl FromStr for GrantType { type Err = WebError; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "password" => Ok(GrantType::Password), "refresh_token" => Ok(GrantType::Refresh), _ => Err(WebError::InvalidGrantType), } } } #[test] fn test_grant_type_from_str() { assert_eq!( GrantType::from_str("password").unwrap(), GrantType::Password ) } /// /// # Helpers /// /// /// Finds the `grant_type` in the Vector of form fields /// type Fields = [(String, String)]; fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> { for &(ref k, ref v) in fields.iter() { if k == "grant_type" { return GrantType::from_str(v); } } Err(WebError::InvalidGrantType) } #[test] fn test_find_grant_type() { assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "password".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Password ); assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "refresh_token".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Refresh ); assert_eq!( find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(), WebError::InvalidGrantType ); } fn form_to_map(fields: &Fields) -> HashMap<&str, &str> { HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| { let k: &str = k; let v: &str = v; (k, v) })) } /// /// Converts the Form Fields to a `PasswordGrantRequest` /// fn form_to_password_grant( fields: &[(String, String)], ) -> Result<user::PasswordGrantRequest, WebError>
#[test] fn test_form_to_password_grant() { assert_eq!( form_to_password_grant(&vec![ ("grant_type".into(), "password".into()), ("username".into(), "test-user".into()), ("password".into(), "test-password".into()), ]).unwrap(), user::PasswordGrantRequest { username: "test-user".into(), password: "test-password".into(), } ); assert_eq!( form_to_password_grant(&vec![]).unwrap_err(), WebError::MissingUsername ); assert_eq!( form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(), WebError::MissingPassword ); assert_eq!( form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(), WebError::MissingUsername ); } /// Converts the Form Fields into a `RefreshGrantRequest` fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> { let fields = form_to_map(fields); let token = fields .get("refresh_token") .ok_or(WebError::MissingRefreshToken)?; Ok(user::RefreshGrantRequest { refresh_token: token, }) } #[test] fn test_form_to_refresh_grant() { assert_eq!( form_to_refresh_grant(&vec![ ("grant_type".into(), "refesh_token".into()), ("refresh_token".into(), "12345".into()), ]).unwrap(), user::RefreshGrantRequest { refresh_token: "12345".into(), } ); assert_eq!( form_to_refresh_grant(&vec![]).unwrap_err(), WebError::MissingRefreshToken ); }
{ let fields = form_to_map(fields); let username = fields.get("username").ok_or(WebError::MissingUsername)?; let password = fields.get("password").ok_or(WebError::MissingPassword)?; Ok(user::PasswordGrantRequest { username, password }) }
identifier_body
web.rs
//! This is the initial MVP of the events service to get the BDD tests to work use db; use models::user::IOModel; use models::user::pg::PgModel as UserModel; use rouille; use rouille::input::post; use rouille::{Request, Response}; use services::user; use services::user::Service as UserService; use std::collections::HashMap; use std::error::Error; use std::fmt; use std::io; use std::iter::FromIterator; use std::str::FromStr; use uuid::Uuid; // // Runs a web server that passes the BDD tests // pub fn run() { eprintln!("Listening on 0.0.0.0:8080"); rouille::start_server("0.0.0.0:8080", |request| { rouille::log(request, io::stderr(), || { let conn = &db::connection(); let user_model = &UserModel::new(conn); let user_service = &UserService::new(user_model, b"...."); router!(request, (GET) (/status) => { status(user_model) }, (POST) (/oauth/register) => { oauth_register(user_service, request) }, (GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) }, (POST) (/oauth/token) => { oauth_token(user_service, request) }, (GET) (/oauth/me) => { me(user_service, request) }, _ => Response::empty_404() ) }) }) } // // Handlers // #[derive(Serialize, Debug)] struct
<'a> { pub status: &'a str, } /// this is the status endpoint fn status(user_model: &UserModel) -> Response { let status = user_model .find(&Uuid::new_v4()) .map(|_| Status { status: "up" }) .unwrap_or_else(|_| Status { status: "down" }); Response::json(&status) } #[derive(Deserialize)] struct RegisterForm { name: String, password: String, email: String, } /// this is the user registration endpoint /// /// This accepts a json POST of [`RegisterForm`] fn oauth_register(user_service: &UserService, request: &Request) -> Response { let data: RegisterForm = try_or_400!(rouille::input::json_input(request)); let req = user::RegisterRequest { name: &data.name, password: &data.password, email: &data.email, }; user_service .register(&req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the user confirmation endpoint /// /// This is a GET request for a query string of `?confirm_token` fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response { let confirm_token: String = try_or_400!( request .get_param("confirm_token") .ok_or(WebError::MissingConfirmToken) ); let req = &user::ConfirmNewUserRequest { confirm_token: &confirm_token, }; user_service .confirm_new_user(req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the oauth token endpoint for making password or refresh grants against /// /// This follows the protocol set up by the following specs /// /// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2) /// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6) /// fn oauth_token(user_service: &UserService, request: &Request) -> Response { let form = &try_or_400!(post::raw_urlencoded_post_input(request)); let grant_type = try_or_400!(find_grant_type(form)); match grant_type { GrantType::Password => { let req = &try_or_400!(form_to_password_grant(form)); user_service .password_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } GrantType::Refresh => { let req = &try_or_400!(form_to_refresh_grant(form)); user_service .refresh_token_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } } } /// The current user handler /// /// This requires a `Authorization: Bearer {access_token}` header to make the request fn me(user_service: &UserService, request: &Request) -> Response { let access_token = request.header("Authorization") .and_then(move |x| x.get(7..)) // Get everything after "Bearer " .unwrap_or(""); let req = &user::CurrentUserRequest { access_token }; user_service .current_user(req) .map(Response::from) .unwrap_or_else(Response::from) } // Cenverters // impl From<user::CurrentUserResponse> for Response { fn from(result: user::CurrentUserResponse) -> Self { Response::json(&result) } } impl From<user::AccessTokenResponse> for Response { fn from(result: user::AccessTokenResponse) -> Self { Response::json(&result) } } impl From<user::ConfirmNewUserResponse> for Response { fn from(result: user::ConfirmNewUserResponse) -> Self { Response::json(&result) } } impl From<user::RegisterResponse> for Response { fn from(result: user::RegisterResponse) -> Self { Response::json(&result) } } /// /// This is a private Error type for things that can go wrong /// #[derive(Debug, PartialEq)] enum WebError { MissingConfirmToken, MissingPassword, MissingUsername, MissingRefreshToken, InvalidGrantType, } impl fmt::Display for WebError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } impl Error for WebError { fn description(&self) -> &str { use self::WebError::*; match *self { MissingUsername => "missing username", MissingPassword => "missing password", MissingRefreshToken => "missing refresh_token", MissingConfirmToken => "missing confirm token", InvalidGrantType => "invalid grant type", } } } impl From<user::ServiceError> for Response { fn from(err: user::ServiceError) -> Self { use services::user::ServiceError::*; match err { InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400), PermissionDenied => Response::text("").with_status_code(403), UserExists => Response::text("UserExists").with_status_code(403), DBError(_) => Response::text("").with_status_code(500), } } } /// /// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"` /// /// Note: We may want to move this to the service module #[derive(Debug, PartialEq)] enum GrantType { Password, Refresh, } impl FromStr for GrantType { type Err = WebError; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "password" => Ok(GrantType::Password), "refresh_token" => Ok(GrantType::Refresh), _ => Err(WebError::InvalidGrantType), } } } #[test] fn test_grant_type_from_str() { assert_eq!( GrantType::from_str("password").unwrap(), GrantType::Password ) } /// /// # Helpers /// /// /// Finds the `grant_type` in the Vector of form fields /// type Fields = [(String, String)]; fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> { for &(ref k, ref v) in fields.iter() { if k == "grant_type" { return GrantType::from_str(v); } } Err(WebError::InvalidGrantType) } #[test] fn test_find_grant_type() { assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "password".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Password ); assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "refresh_token".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Refresh ); assert_eq!( find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(), WebError::InvalidGrantType ); } fn form_to_map(fields: &Fields) -> HashMap<&str, &str> { HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| { let k: &str = k; let v: &str = v; (k, v) })) } /// /// Converts the Form Fields to a `PasswordGrantRequest` /// fn form_to_password_grant( fields: &[(String, String)], ) -> Result<user::PasswordGrantRequest, WebError> { let fields = form_to_map(fields); let username = fields.get("username").ok_or(WebError::MissingUsername)?; let password = fields.get("password").ok_or(WebError::MissingPassword)?; Ok(user::PasswordGrantRequest { username, password }) } #[test] fn test_form_to_password_grant() { assert_eq!( form_to_password_grant(&vec![ ("grant_type".into(), "password".into()), ("username".into(), "test-user".into()), ("password".into(), "test-password".into()), ]).unwrap(), user::PasswordGrantRequest { username: "test-user".into(), password: "test-password".into(), } ); assert_eq!( form_to_password_grant(&vec![]).unwrap_err(), WebError::MissingUsername ); assert_eq!( form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(), WebError::MissingPassword ); assert_eq!( form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(), WebError::MissingUsername ); } /// Converts the Form Fields into a `RefreshGrantRequest` fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> { let fields = form_to_map(fields); let token = fields .get("refresh_token") .ok_or(WebError::MissingRefreshToken)?; Ok(user::RefreshGrantRequest { refresh_token: token, }) } #[test] fn test_form_to_refresh_grant() { assert_eq!( form_to_refresh_grant(&vec![ ("grant_type".into(), "refesh_token".into()), ("refresh_token".into(), "12345".into()), ]).unwrap(), user::RefreshGrantRequest { refresh_token: "12345".into(), } ); assert_eq!( form_to_refresh_grant(&vec![]).unwrap_err(), WebError::MissingRefreshToken ); }
Status
identifier_name
web.rs
//! This is the initial MVP of the events service to get the BDD tests to work use db; use models::user::IOModel; use models::user::pg::PgModel as UserModel; use rouille; use rouille::input::post; use rouille::{Request, Response}; use services::user; use services::user::Service as UserService; use std::collections::HashMap; use std::error::Error; use std::fmt; use std::io; use std::iter::FromIterator; use std::str::FromStr; use uuid::Uuid; // // Runs a web server that passes the BDD tests // pub fn run() { eprintln!("Listening on 0.0.0.0:8080"); rouille::start_server("0.0.0.0:8080", |request| { rouille::log(request, io::stderr(), || { let conn = &db::connection(); let user_model = &UserModel::new(conn); let user_service = &UserService::new(user_model, b"...."); router!(request, (GET) (/status) => { status(user_model) }, (POST) (/oauth/register) => { oauth_register(user_service, request) }, (GET) (/oauth/register/confirm) => { oauth_register_confirm(user_service, request) }, (POST) (/oauth/token) => { oauth_token(user_service, request) }, (GET) (/oauth/me) => { me(user_service, request) }, _ => Response::empty_404() ) }) }) } // // Handlers // #[derive(Serialize, Debug)] struct Status<'a> { pub status: &'a str, } /// this is the status endpoint fn status(user_model: &UserModel) -> Response { let status = user_model .find(&Uuid::new_v4()) .map(|_| Status { status: "up" }) .unwrap_or_else(|_| Status { status: "down" }); Response::json(&status) } #[derive(Deserialize)] struct RegisterForm { name: String, password: String, email: String, } /// this is the user registration endpoint /// /// This accepts a json POST of [`RegisterForm`] fn oauth_register(user_service: &UserService, request: &Request) -> Response { let data: RegisterForm = try_or_400!(rouille::input::json_input(request)); let req = user::RegisterRequest { name: &data.name, password: &data.password, email: &data.email, }; user_service .register(&req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the user confirmation endpoint /// /// This is a GET request for a query string of `?confirm_token` fn oauth_register_confirm(user_service: &UserService, request: &Request) -> Response { let confirm_token: String = try_or_400!( request .get_param("confirm_token") .ok_or(WebError::MissingConfirmToken) ); let req = &user::ConfirmNewUserRequest { confirm_token: &confirm_token, }; user_service .confirm_new_user(req) .map(Response::from) .unwrap_or_else(Response::from) } /// this is the oauth token endpoint for making password or refresh grants against /// /// This follows the protocol set up by the following specs /// /// - [password grant](https://tools.ietf.org/html/rfc6749#section-4.3.2) /// - [refresh grant](https://tools.ietf.org/html/rfc6749#section-6) /// fn oauth_token(user_service: &UserService, request: &Request) -> Response { let form = &try_or_400!(post::raw_urlencoded_post_input(request)); let grant_type = try_or_400!(find_grant_type(form)); match grant_type { GrantType::Password => { let req = &try_or_400!(form_to_password_grant(form)); user_service .password_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } GrantType::Refresh => { let req = &try_or_400!(form_to_refresh_grant(form)); user_service .refresh_token_grant(req) .map(Response::from) .unwrap_or_else(Response::from) } } } /// The current user handler /// /// This requires a `Authorization: Bearer {access_token}` header to make the request fn me(user_service: &UserService, request: &Request) -> Response { let access_token = request.header("Authorization") .and_then(move |x| x.get(7..)) // Get everything after "Bearer " .unwrap_or(""); let req = &user::CurrentUserRequest { access_token }; user_service .current_user(req) .map(Response::from) .unwrap_or_else(Response::from) } // Cenverters // impl From<user::CurrentUserResponse> for Response { fn from(result: user::CurrentUserResponse) -> Self { Response::json(&result) } } impl From<user::AccessTokenResponse> for Response { fn from(result: user::AccessTokenResponse) -> Self { Response::json(&result) } } impl From<user::ConfirmNewUserResponse> for Response { fn from(result: user::ConfirmNewUserResponse) -> Self { Response::json(&result) } } impl From<user::RegisterResponse> for Response { fn from(result: user::RegisterResponse) -> Self { Response::json(&result) } } /// /// This is a private Error type for things that can go wrong /// #[derive(Debug, PartialEq)] enum WebError { MissingConfirmToken, MissingPassword, MissingUsername, MissingRefreshToken, InvalidGrantType, } impl fmt::Display for WebError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } } impl Error for WebError { fn description(&self) -> &str { use self::WebError::*; match *self { MissingUsername => "missing username", MissingPassword => "missing password", MissingRefreshToken => "missing refresh_token", MissingConfirmToken => "missing confirm token", InvalidGrantType => "invalid grant type", } } } impl From<user::ServiceError> for Response { fn from(err: user::ServiceError) -> Self { use services::user::ServiceError::*; match err { InvalidConfirmToken => Response::text("InvalidConfirmToken").with_status_code(400), PermissionDenied => Response::text("").with_status_code(403), UserExists => Response::text("UserExists").with_status_code(403), DBError(_) => Response::text("").with_status_code(500), } } } /// /// This is a enum to represent the `grant_type` strings, `"password"` and `"refresh_token"` /// /// Note: We may want to move this to the service module #[derive(Debug, PartialEq)] enum GrantType { Password, Refresh, } impl FromStr for GrantType { type Err = WebError; fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "password" => Ok(GrantType::Password), "refresh_token" => Ok(GrantType::Refresh), _ => Err(WebError::InvalidGrantType), } } } #[test] fn test_grant_type_from_str() { assert_eq!( GrantType::from_str("password").unwrap(), GrantType::Password ) } /// /// # Helpers /// /// /// Finds the `grant_type` in the Vector of form fields /// type Fields = [(String, String)]; fn find_grant_type(fields: &Fields) -> Result<GrantType, WebError> { for &(ref k, ref v) in fields.iter() { if k == "grant_type" { return GrantType::from_str(v); } } Err(WebError::InvalidGrantType) } #[test] fn test_find_grant_type() { assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "password".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Password ); assert_eq!( find_grant_type(&vec![ ("x".into(), "y".into()), ("grant_type".into(), "refresh_token".into()), ("a".into(), "b".into()), ]).unwrap(), GrantType::Refresh ); assert_eq!( find_grant_type(&vec![("x".into(), "y".into()), ("a".into(), "b".into())]).unwrap_err(), WebError::InvalidGrantType ); } fn form_to_map(fields: &Fields) -> HashMap<&str, &str> { HashMap::from_iter(fields.iter().map(|&(ref k, ref v)| { let k: &str = k; let v: &str = v; (k, v) })) } /// /// Converts the Form Fields to a `PasswordGrantRequest` /// fn form_to_password_grant( fields: &[(String, String)], ) -> Result<user::PasswordGrantRequest, WebError> { let fields = form_to_map(fields); let username = fields.get("username").ok_or(WebError::MissingUsername)?; let password = fields.get("password").ok_or(WebError::MissingPassword)?; Ok(user::PasswordGrantRequest { username, password }) } #[test] fn test_form_to_password_grant() { assert_eq!( form_to_password_grant(&vec![ ("grant_type".into(), "password".into()), ("username".into(), "test-user".into()), ("password".into(), "test-password".into()), ]).unwrap(), user::PasswordGrantRequest { username: "test-user".into(), password: "test-password".into(), } ); assert_eq!( form_to_password_grant(&vec![]).unwrap_err(), WebError::MissingUsername ); assert_eq!( form_to_password_grant(&vec![("username".into(), "test-user".into())]).unwrap_err(),
); assert_eq!( form_to_password_grant(&vec![("password".into(), "test-pass".into())]).unwrap_err(), WebError::MissingUsername ); } /// Converts the Form Fields into a `RefreshGrantRequest` fn form_to_refresh_grant(fields: &Fields) -> Result<user::RefreshGrantRequest, WebError> { let fields = form_to_map(fields); let token = fields .get("refresh_token") .ok_or(WebError::MissingRefreshToken)?; Ok(user::RefreshGrantRequest { refresh_token: token, }) } #[test] fn test_form_to_refresh_grant() { assert_eq!( form_to_refresh_grant(&vec![ ("grant_type".into(), "refesh_token".into()), ("refresh_token".into(), "12345".into()), ]).unwrap(), user::RefreshGrantRequest { refresh_token: "12345".into(), } ); assert_eq!( form_to_refresh_grant(&vec![]).unwrap_err(), WebError::MissingRefreshToken ); }
WebError::MissingPassword
random_line_split
chmod.rs
// // Copyright (c) 2018, The MesaLock Linux Project Contributors // All rights reserved. // // This work is licensed under the terms of the BSD 3-Clause License. // For a copy, see the LICENSE file. // // This file incorporates work covered by the following copyright and // permission notice: // // Copyright (c) 2013-2018, Jordi Boggiano // Copyright (c) 2013-2018, Alex Lyon // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // use util; use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite}; use clap::{AppSettings, Arg, ArgGroup, OsValues}; use std::ffi::{OsStr, OsString}; use std::fs; use std::io::{self, Write}; use std::os::unix::fs::{MetadataExt, PermissionsExt}; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use uucore::fs::display_permissions_unix; use uucore::mode; use walkdir::WalkDir; const NAME: &str = "chmod"; pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files"; const MODE_SYNTAX: &str = " Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'. "; #[derive(Fail, Debug)] enum ChmodError { #[fail(display = "cannot stat attributes of '{}': {}", _0, _1)] Stat(String, #[cause] io::Error), } #[derive(PartialEq)] enum Verbosity { None, Changes, Quiet, Verbose, } enum MessageKind { Stdout, Stderr, } // FIXME: find a better way to store this (preferably avoid allocating) // NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic // type struct Message { kind: MessageKind, data: String, } impl Message { pub fn stdout(data: String) -> Self { Self { kind: MessageKind::Stdout, data, } } pub fn stderr(data: String) -> Self { Self { kind: MessageKind::Stderr, data, } } } struct Options<'a> { verbosity: Verbosity, preserve_root: bool, recursive: bool, fmode: Option<u32>, cmode: Option<&'a str>, current_dir: Option<PathBuf>, } pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()> where S: UtilSetup, T: ArgsIter, { let matches = { let app = util_app!(NAME) .after_help(MODE_SYNTAX) .setting(AppSettings::AllowLeadingHyphen) .arg(Arg::with_name("recursive") .long("recursive") .short("R") .help("change files and directories recursively")) .arg(Arg::with_name("reference") .long("reference") .takes_value(true) .value_name("RFILE") .help("use RFILE's mode instead of provided MODE values")) .arg(Arg::with_name("preserve-root") .long("preserve-root") .help("fail to operate recursively on '/'")) .arg(Arg::with_name("no-preserve-root") .long("no-preserve-root") .overrides_with("preserve-root") .help("do not treat '/' specially (the default)")) .arg(Arg::with_name("verbose") .long("verbose") .short("v") .help("output a diagnostic for every file processed")) .arg(Arg::with_name("quiet") .long("quiet") .short("f") .visible_alias("silent") .help("suppress most error messages")) .arg(Arg::with_name("changes") .long("changes") .short("c") .help("like verbose but report only when a change is made")) .group(ArgGroup::with_name("verbosity") .args(&["verbose", "quiet", "changes"])) // FIXME: not sure how to tell clap that MODE can be missing if --reference is // given by the user. clap is also unhappy that FILES (which has an // index that occurs later than MODE) is required while MODE is not .arg(Arg::with_name("MODE") .index(1)
.validator_os(validate_mode) .required(true)) //.conflicts_with("reference")) .arg(Arg::with_name("FILES") .index(2) .required(true) .multiple(true)); app.get_matches_from_safe(args)? }; let verbosity = if matches.is_present("changes") { Verbosity::Changes } else if matches.is_present("quiet") { Verbosity::Quiet } else if matches.is_present("verbose") { Verbosity::Verbose } else { Verbosity::None }; let preserve_root = matches.is_present("preserve-root"); let recursive = matches.is_present("recursive"); let fmode = match matches.value_of_os("reference") { Some(ref_file) => Some(fs::metadata(ref_file) .map(|data| data.mode()) .map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?), None => None, }; let current_dir = setup.current_dir().map(|p| p.to_owned()); let (_, stdout, stderr) = setup.stdio(); let mut chmoder = Chmoder { stdout: stdout.lock()?, stderr: stderr.lock()?, }; let options = Options { verbosity: verbosity, preserve_root: preserve_root, recursive: recursive, fmode: fmode, cmode: matches.value_of("MODE"), current_dir: current_dir, }; let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?; if exitcode == 0 { Ok(()) } else { Err(MesaError { exitcode: exitcode, progname: None, err: None, }) } } fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> { // NOTE: used to use regex to match the mode, but that caused the binary size to increase // considerably arg.to_str() .ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into()) .and_then(|s| { for mode in s.split(',') { if mode::parse_numeric(0, mode).is_err() && mode::parse_symbolic(0, mode, false).is_err() { return Err("found invalid character in mode string".into()); } } Ok(()) }) } struct Chmoder<O, E> where O: Write, E: Write, { stdout: O, stderr: E, } impl<'a, O, E> Chmoder<O, E> where O: Write, E: Write, { fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> { let mut r = 0; let mut msgs = [None, None]; for filename in files { let file = util::actual_path(&options.current_dir, filename); r |= if file.is_dir() && options.recursive { self.chmod_dir(options, &mut msgs, &file) } else { let res = chmod_file(options, &mut msgs, &file); self.write_msgs(&mut msgs).map(|_| res) }?; } Ok(r) } fn chmod_dir( &mut self, options: &Options, msgs: &mut [Option<Message>; 2], file: &Path, ) -> Result<i32> { let mut r = 0; if!options.preserve_root || file!= Path::new("/") { let walker = WalkDir::new(file).contents_first(true); for entry in walker { match entry { Ok(entry) => { r |= chmod_file(options, msgs, &entry.path()); self.write_msgs(msgs)?; } Err(f) => display_msg!(self.stderr, "{}", f)?, } } } else { display_msg!( self.stderr, "could not change permissions of directory '{}'", file.display() )?; r = 1; } Ok(r) } fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> { for msg in msgs { if let Some(msg) = msg { match msg.kind { MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?, MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?, } } *msg = None; } Ok(()) } } #[cfg(any(unix, target_os = "redox"))] fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 { let mut fperm = match fs::metadata(file) { Ok(meta) => meta.mode() & 0o7777, Err(err) => { if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not stat '{}': {}", file.display(), err ))); } return 1; } }; match options.fmode { Some(mode) => change_file(options, msgs, fperm, mode, file), None => { let cmode_unwrapped = options.cmode.clone().unwrap(); for mode in cmode_unwrapped.split(',') { // cmode is guaranteed to be Some in this case let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7']; let result = if mode.contains(arr) { mode::parse_numeric(fperm, mode) } else { mode::parse_symbolic(fperm, mode, file.is_dir()) }; match result { Ok(mode) => { change_file(options, msgs, fperm, mode, file); fperm = mode; } Err(f) => { if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f))); } return 1; } } } 0 } } } #[cfg(unix)] fn change_file( options: &Options, msgs: &mut [Option<Message>; 2], fperm: u32, mode: u32, file: &Path, ) -> i32 { if fperm == mode { if options.verbosity == Verbosity::Verbose { msgs[0] = Some(Message::stdout(format!( "mode of '{}' retained as {:o} ({})", file.display(), fperm, display_permissions_unix(fperm) ))); } return 0; } let mut exitcode = 0; let res = fs::set_permissions(file, fs::Permissions::from_mode(mode)); if let Err(err) = res { let mut count = 0; if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not set permissions: {}", err ))); count += 1; } if options.verbosity == Verbosity::Verbose { msgs[count] = Some(Message::stdout(format!( "failed to change mode of file '{}' from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } exitcode = 1; } else { if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes { msgs[0] = Some(Message::stdout(format!( "mode of '{}' changed from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } } exitcode }
random_line_split
chmod.rs
// // Copyright (c) 2018, The MesaLock Linux Project Contributors // All rights reserved. // // This work is licensed under the terms of the BSD 3-Clause License. // For a copy, see the LICENSE file. // // This file incorporates work covered by the following copyright and // permission notice: // // Copyright (c) 2013-2018, Jordi Boggiano // Copyright (c) 2013-2018, Alex Lyon // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // use util; use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite}; use clap::{AppSettings, Arg, ArgGroup, OsValues}; use std::ffi::{OsStr, OsString}; use std::fs; use std::io::{self, Write}; use std::os::unix::fs::{MetadataExt, PermissionsExt}; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use uucore::fs::display_permissions_unix; use uucore::mode; use walkdir::WalkDir; const NAME: &str = "chmod"; pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files"; const MODE_SYNTAX: &str = " Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'. "; #[derive(Fail, Debug)] enum ChmodError { #[fail(display = "cannot stat attributes of '{}': {}", _0, _1)] Stat(String, #[cause] io::Error), } #[derive(PartialEq)] enum Verbosity { None, Changes, Quiet, Verbose, } enum MessageKind { Stdout, Stderr, } // FIXME: find a better way to store this (preferably avoid allocating) // NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic // type struct Message { kind: MessageKind, data: String, } impl Message { pub fn
(data: String) -> Self { Self { kind: MessageKind::Stdout, data, } } pub fn stderr(data: String) -> Self { Self { kind: MessageKind::Stderr, data, } } } struct Options<'a> { verbosity: Verbosity, preserve_root: bool, recursive: bool, fmode: Option<u32>, cmode: Option<&'a str>, current_dir: Option<PathBuf>, } pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()> where S: UtilSetup, T: ArgsIter, { let matches = { let app = util_app!(NAME) .after_help(MODE_SYNTAX) .setting(AppSettings::AllowLeadingHyphen) .arg(Arg::with_name("recursive") .long("recursive") .short("R") .help("change files and directories recursively")) .arg(Arg::with_name("reference") .long("reference") .takes_value(true) .value_name("RFILE") .help("use RFILE's mode instead of provided MODE values")) .arg(Arg::with_name("preserve-root") .long("preserve-root") .help("fail to operate recursively on '/'")) .arg(Arg::with_name("no-preserve-root") .long("no-preserve-root") .overrides_with("preserve-root") .help("do not treat '/' specially (the default)")) .arg(Arg::with_name("verbose") .long("verbose") .short("v") .help("output a diagnostic for every file processed")) .arg(Arg::with_name("quiet") .long("quiet") .short("f") .visible_alias("silent") .help("suppress most error messages")) .arg(Arg::with_name("changes") .long("changes") .short("c") .help("like verbose but report only when a change is made")) .group(ArgGroup::with_name("verbosity") .args(&["verbose", "quiet", "changes"])) // FIXME: not sure how to tell clap that MODE can be missing if --reference is // given by the user. clap is also unhappy that FILES (which has an // index that occurs later than MODE) is required while MODE is not .arg(Arg::with_name("MODE") .index(1) .validator_os(validate_mode) .required(true)) //.conflicts_with("reference")) .arg(Arg::with_name("FILES") .index(2) .required(true) .multiple(true)); app.get_matches_from_safe(args)? }; let verbosity = if matches.is_present("changes") { Verbosity::Changes } else if matches.is_present("quiet") { Verbosity::Quiet } else if matches.is_present("verbose") { Verbosity::Verbose } else { Verbosity::None }; let preserve_root = matches.is_present("preserve-root"); let recursive = matches.is_present("recursive"); let fmode = match matches.value_of_os("reference") { Some(ref_file) => Some(fs::metadata(ref_file) .map(|data| data.mode()) .map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?), None => None, }; let current_dir = setup.current_dir().map(|p| p.to_owned()); let (_, stdout, stderr) = setup.stdio(); let mut chmoder = Chmoder { stdout: stdout.lock()?, stderr: stderr.lock()?, }; let options = Options { verbosity: verbosity, preserve_root: preserve_root, recursive: recursive, fmode: fmode, cmode: matches.value_of("MODE"), current_dir: current_dir, }; let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?; if exitcode == 0 { Ok(()) } else { Err(MesaError { exitcode: exitcode, progname: None, err: None, }) } } fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> { // NOTE: used to use regex to match the mode, but that caused the binary size to increase // considerably arg.to_str() .ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into()) .and_then(|s| { for mode in s.split(',') { if mode::parse_numeric(0, mode).is_err() && mode::parse_symbolic(0, mode, false).is_err() { return Err("found invalid character in mode string".into()); } } Ok(()) }) } struct Chmoder<O, E> where O: Write, E: Write, { stdout: O, stderr: E, } impl<'a, O, E> Chmoder<O, E> where O: Write, E: Write, { fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> { let mut r = 0; let mut msgs = [None, None]; for filename in files { let file = util::actual_path(&options.current_dir, filename); r |= if file.is_dir() && options.recursive { self.chmod_dir(options, &mut msgs, &file) } else { let res = chmod_file(options, &mut msgs, &file); self.write_msgs(&mut msgs).map(|_| res) }?; } Ok(r) } fn chmod_dir( &mut self, options: &Options, msgs: &mut [Option<Message>; 2], file: &Path, ) -> Result<i32> { let mut r = 0; if!options.preserve_root || file!= Path::new("/") { let walker = WalkDir::new(file).contents_first(true); for entry in walker { match entry { Ok(entry) => { r |= chmod_file(options, msgs, &entry.path()); self.write_msgs(msgs)?; } Err(f) => display_msg!(self.stderr, "{}", f)?, } } } else { display_msg!( self.stderr, "could not change permissions of directory '{}'", file.display() )?; r = 1; } Ok(r) } fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> { for msg in msgs { if let Some(msg) = msg { match msg.kind { MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?, MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?, } } *msg = None; } Ok(()) } } #[cfg(any(unix, target_os = "redox"))] fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 { let mut fperm = match fs::metadata(file) { Ok(meta) => meta.mode() & 0o7777, Err(err) => { if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not stat '{}': {}", file.display(), err ))); } return 1; } }; match options.fmode { Some(mode) => change_file(options, msgs, fperm, mode, file), None => { let cmode_unwrapped = options.cmode.clone().unwrap(); for mode in cmode_unwrapped.split(',') { // cmode is guaranteed to be Some in this case let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7']; let result = if mode.contains(arr) { mode::parse_numeric(fperm, mode) } else { mode::parse_symbolic(fperm, mode, file.is_dir()) }; match result { Ok(mode) => { change_file(options, msgs, fperm, mode, file); fperm = mode; } Err(f) => { if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f))); } return 1; } } } 0 } } } #[cfg(unix)] fn change_file( options: &Options, msgs: &mut [Option<Message>; 2], fperm: u32, mode: u32, file: &Path, ) -> i32 { if fperm == mode { if options.verbosity == Verbosity::Verbose { msgs[0] = Some(Message::stdout(format!( "mode of '{}' retained as {:o} ({})", file.display(), fperm, display_permissions_unix(fperm) ))); } return 0; } let mut exitcode = 0; let res = fs::set_permissions(file, fs::Permissions::from_mode(mode)); if let Err(err) = res { let mut count = 0; if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not set permissions: {}", err ))); count += 1; } if options.verbosity == Verbosity::Verbose { msgs[count] = Some(Message::stdout(format!( "failed to change mode of file '{}' from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } exitcode = 1; } else { if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes { msgs[0] = Some(Message::stdout(format!( "mode of '{}' changed from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } } exitcode }
stdout
identifier_name
chmod.rs
// // Copyright (c) 2018, The MesaLock Linux Project Contributors // All rights reserved. // // This work is licensed under the terms of the BSD 3-Clause License. // For a copy, see the LICENSE file. // // This file incorporates work covered by the following copyright and // permission notice: // // Copyright (c) 2013-2018, Jordi Boggiano // Copyright (c) 2013-2018, Alex Lyon // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. // IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, // TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE // SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // use util; use {ArgsIter, MesaError, Result, UtilSetup, UtilWrite}; use clap::{AppSettings, Arg, ArgGroup, OsValues}; use std::ffi::{OsStr, OsString}; use std::fs; use std::io::{self, Write}; use std::os::unix::fs::{MetadataExt, PermissionsExt}; use std::path::{Path, PathBuf}; use std::result::Result as StdResult; use uucore::fs::display_permissions_unix; use uucore::mode; use walkdir::WalkDir; const NAME: &str = "chmod"; pub(crate) const DESCRIPTION: &str = "Change the file permissions of given files"; const MODE_SYNTAX: &str = " Each MODE is of the form '[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=]?[0-7]+'. "; #[derive(Fail, Debug)] enum ChmodError { #[fail(display = "cannot stat attributes of '{}': {}", _0, _1)] Stat(String, #[cause] io::Error), } #[derive(PartialEq)] enum Verbosity { None, Changes, Quiet, Verbose, } enum MessageKind { Stdout, Stderr, } // FIXME: find a better way to store this (preferably avoid allocating) // NOTE: the message setup is to avoid duplicating chmod_file() and change_file() for every generic // type struct Message { kind: MessageKind, data: String, } impl Message { pub fn stdout(data: String) -> Self { Self { kind: MessageKind::Stdout, data, } } pub fn stderr(data: String) -> Self { Self { kind: MessageKind::Stderr, data, } } } struct Options<'a> { verbosity: Verbosity, preserve_root: bool, recursive: bool, fmode: Option<u32>, cmode: Option<&'a str>, current_dir: Option<PathBuf>, } pub fn execute<S, T>(setup: &mut S, args: T) -> Result<()> where S: UtilSetup, T: ArgsIter, { let matches = { let app = util_app!(NAME) .after_help(MODE_SYNTAX) .setting(AppSettings::AllowLeadingHyphen) .arg(Arg::with_name("recursive") .long("recursive") .short("R") .help("change files and directories recursively")) .arg(Arg::with_name("reference") .long("reference") .takes_value(true) .value_name("RFILE") .help("use RFILE's mode instead of provided MODE values")) .arg(Arg::with_name("preserve-root") .long("preserve-root") .help("fail to operate recursively on '/'")) .arg(Arg::with_name("no-preserve-root") .long("no-preserve-root") .overrides_with("preserve-root") .help("do not treat '/' specially (the default)")) .arg(Arg::with_name("verbose") .long("verbose") .short("v") .help("output a diagnostic for every file processed")) .arg(Arg::with_name("quiet") .long("quiet") .short("f") .visible_alias("silent") .help("suppress most error messages")) .arg(Arg::with_name("changes") .long("changes") .short("c") .help("like verbose but report only when a change is made")) .group(ArgGroup::with_name("verbosity") .args(&["verbose", "quiet", "changes"])) // FIXME: not sure how to tell clap that MODE can be missing if --reference is // given by the user. clap is also unhappy that FILES (which has an // index that occurs later than MODE) is required while MODE is not .arg(Arg::with_name("MODE") .index(1) .validator_os(validate_mode) .required(true)) //.conflicts_with("reference")) .arg(Arg::with_name("FILES") .index(2) .required(true) .multiple(true)); app.get_matches_from_safe(args)? }; let verbosity = if matches.is_present("changes") { Verbosity::Changes } else if matches.is_present("quiet") { Verbosity::Quiet } else if matches.is_present("verbose") { Verbosity::Verbose } else { Verbosity::None }; let preserve_root = matches.is_present("preserve-root"); let recursive = matches.is_present("recursive"); let fmode = match matches.value_of_os("reference") { Some(ref_file) => Some(fs::metadata(ref_file) .map(|data| data.mode()) .map_err(|e| ChmodError::Stat(ref_file.to_string_lossy().into(), e))?), None => None, }; let current_dir = setup.current_dir().map(|p| p.to_owned()); let (_, stdout, stderr) = setup.stdio(); let mut chmoder = Chmoder { stdout: stdout.lock()?, stderr: stderr.lock()?, }; let options = Options { verbosity: verbosity, preserve_root: preserve_root, recursive: recursive, fmode: fmode, cmode: matches.value_of("MODE"), current_dir: current_dir, }; let exitcode = chmoder.chmod(&options, matches.values_of_os("FILES").unwrap())?; if exitcode == 0 { Ok(()) } else { Err(MesaError { exitcode: exitcode, progname: None, err: None, }) } } fn validate_mode(arg: &OsStr) -> StdResult<(), OsString> { // NOTE: used to use regex to match the mode, but that caused the binary size to increase // considerably arg.to_str() .ok_or_else(|| "mode was not a string (must be encoded using UTF-8)".into()) .and_then(|s| { for mode in s.split(',') { if mode::parse_numeric(0, mode).is_err() && mode::parse_symbolic(0, mode, false).is_err() { return Err("found invalid character in mode string".into()); } } Ok(()) }) } struct Chmoder<O, E> where O: Write, E: Write, { stdout: O, stderr: E, } impl<'a, O, E> Chmoder<O, E> where O: Write, E: Write, { fn chmod<'b>(&mut self, options: &Options, files: OsValues<'b>) -> Result<i32> { let mut r = 0; let mut msgs = [None, None]; for filename in files { let file = util::actual_path(&options.current_dir, filename); r |= if file.is_dir() && options.recursive { self.chmod_dir(options, &mut msgs, &file) } else { let res = chmod_file(options, &mut msgs, &file); self.write_msgs(&mut msgs).map(|_| res) }?; } Ok(r) } fn chmod_dir( &mut self, options: &Options, msgs: &mut [Option<Message>; 2], file: &Path, ) -> Result<i32> { let mut r = 0; if!options.preserve_root || file!= Path::new("/") { let walker = WalkDir::new(file).contents_first(true); for entry in walker { match entry { Ok(entry) => { r |= chmod_file(options, msgs, &entry.path()); self.write_msgs(msgs)?; } Err(f) => display_msg!(self.stderr, "{}", f)?, } } } else { display_msg!( self.stderr, "could not change permissions of directory '{}'", file.display() )?; r = 1; } Ok(r) } fn write_msgs(&mut self, msgs: &mut [Option<Message>; 2]) -> Result<()> { for msg in msgs { if let Some(msg) = msg { match msg.kind { MessageKind::Stdout => display_msg!(self.stdout, "{}", msg.data)?, MessageKind::Stderr => display_msg!(self.stderr, "{}", msg.data)?, } } *msg = None; } Ok(()) } } #[cfg(any(unix, target_os = "redox"))] fn chmod_file(options: &Options, msgs: &mut [Option<Message>; 2], file: &Path) -> i32 { let mut fperm = match fs::metadata(file) { Ok(meta) => meta.mode() & 0o7777, Err(err) => { if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not stat '{}': {}", file.display(), err ))); } return 1; } }; match options.fmode { Some(mode) => change_file(options, msgs, fperm, mode, file), None =>
} } } 0 } } } #[cfg(unix)] fn change_file( options: &Options, msgs: &mut [Option<Message>; 2], fperm: u32, mode: u32, file: &Path, ) -> i32 { if fperm == mode { if options.verbosity == Verbosity::Verbose { msgs[0] = Some(Message::stdout(format!( "mode of '{}' retained as {:o} ({})", file.display(), fperm, display_permissions_unix(fperm) ))); } return 0; } let mut exitcode = 0; let res = fs::set_permissions(file, fs::Permissions::from_mode(mode)); if let Err(err) = res { let mut count = 0; if options.verbosity!= Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!( "could not set permissions: {}", err ))); count += 1; } if options.verbosity == Verbosity::Verbose { msgs[count] = Some(Message::stdout(format!( "failed to change mode of file '{}' from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } exitcode = 1; } else { if options.verbosity == Verbosity::Verbose || options.verbosity == Verbosity::Changes { msgs[0] = Some(Message::stdout(format!( "mode of '{}' changed from {:o} ({}) to {:o} ({})", file.display(), fperm, display_permissions_unix(fperm), mode, display_permissions_unix(mode) ))); } } exitcode }
{ let cmode_unwrapped = options.cmode.clone().unwrap(); for mode in cmode_unwrapped.split(',') { // cmode is guaranteed to be Some in this case let arr: &[char] = &['0', '1', '2', '3', '4', '5', '6', '7']; let result = if mode.contains(arr) { mode::parse_numeric(fperm, mode) } else { mode::parse_symbolic(fperm, mode, file.is_dir()) }; match result { Ok(mode) => { change_file(options, msgs, fperm, mode, file); fperm = mode; } Err(f) => { if options.verbosity != Verbosity::Quiet { msgs[0] = Some(Message::stderr(format!("failed to parse mode: {}", f))); } return 1;
conditional_block
file.rs
use crate::reader::{LittleEndian, ReadBytesExt, Reader}; use std::fmt; use std::io::Read; use thiserror::Error; const ELF_MAGIC: [u8; 4] = [0x7f, b'E', b'L', b'F']; fn show_machine(value: u16) -> &'static str { match value { 0 => "No machine", 1 => "AT&T WE 32100", 2 => "SUN SPARC", 3 => "Intel 80386", 4 => "Motorola m68k family", 5 => "Motorola m88k family", 6 => "Intel MCU", 7 => "Intel 80860", 8 => "MIPS R3000 big-endian", 9 => "IBM System/370", 10 => "MIPS R3000 little-endian", 15 => "HPPA", 16 => "reserved 16", 17 => "Fujitsu VPP500", 18 => "Sun's v8plus", 19 => "Intel 80960", 20 => "PowerPC", 21 => "PowerPC 64-bit", 22 => "IBM S390", 23 => "IBM SPU/SPC", 36 => "NEC V800 series", 37 => "Fujitsu FR20", 38 => "TRW RH-32", 39 => "Motorola RCE", 40 => "ARM", 41 => "Digital Alpha", 42 => "Hitachi SH", 43 => "SPARC v9 64-bit", 44 => "Siemens Tricore", 45 => "Argonaut RISC Core", 46 => "Hitachi H8/300", 47 => "Hitachi H8/300H", 48 => "Hitachi H8S", 49 => "Hitachi H8/500", 50 => "Intel Merced", 51 => "Stanford MIPS-X", 52 => "Motorola Coldfire", 53 => "Motorola M68HC12", 54 => "Fujitsu MMA Multimedia Accelerator", 55 => "Siemens PCP", 56 => "Sony nCPU embeeded RISC", 57 => "Denso NDR1 microprocessor", 58 => "Motorola Start*Core processor", 59 => "Toyota ME16 processor", 60 => "STMicroelectronic ST100 processor", 61 => "Advanced Logic Corp. Tinyj emb.fam", 62 => "AMD x86-64 architecture", 63 => "Sony DSP Processor", 64 => "Digital PDP-10", 65 => "Digital PDP-11", 66 => "Siemens FX66 microcontroller", 67 => "STMicroelectronics ST9+ 8/16 mc", 68 => "STmicroelectronics ST7 8 bit mc", 69 => "Motorola MC68HC16 microcontroller", 70 => "Motorola MC68HC11 microcontroller", 71 => "Motorola MC68HC08 microcontroller", 72 => "Motorola MC68HC05 microcontroller", 73 => "Silicon Graphics SVx", 74 => "STMicroelectronics ST19 8 bit mc", 75 => "Digital VAX", 76 => "Axis Communications 32-bit emb.proc", 77 => "Infineon Technologies 32-bit emb.proc", 78 => "Element 14 64-bit DSP Processor", 79 => "LSI Logic 16-bit DSP Processor", 80 => "Donald Knuth's educational 64-bit proc", 81 => "Harvard University machine-independent object files", 82 => "SiTera Prism", 83 => "Atmel AVR 8-bit microcontroller", 84 => "Fujitsu FR30", 85 => "Mitsubishi D10V", 86 => "Mitsubishi D30V", 87 => "NEC v850", 88 => "Mitsubishi M32R", 89 => "Matsushita MN10300", 90 => "Matsushita MN10200", 91 => "picoJava", 92 => "OpenRISC 32-bit embedded processor", 93 => "ARC International ARCompact", 94 => "Tensilica Xtensa Architecture", 95 => "Alphamosaic VideoCore", 96 => "Thompson Multimedia General Purpose Proc", 97 => "National Semi. 32000", 98 => "Tenor Network TPC", 99 => "Trebia SNP 1000", 100 => "STMicroelectronics ST200", 101 => "Ubicom IP2xxx", 102 => "MAX processor", 103 => "National Semi. CompactRISC", 104 => "Fujitsu F2MC16", 105 => "Texas Instruments msp430", 106 => "Analog Devices Blackfin DSP", 107 => "Seiko Epson S1C33 family", 108 => "Sharp embedded microprocessor", 109 => "Arca RISC", 110 => "PKU-Unity & MPRC Peking Uni. mc series", 111 => "eXcess configurable cpu", 112 => "Icera Semi. Deep Execution Processor", 113 => "Altera Nios II", 114 => "National Semi. CompactRISC CRX", 115 => "Motorola XGATE", 116 => "Infineon C16x/XC16x", 117 => "Renesas M16C", 118 => "Microchip Technology dsPIC30F", 119 => "Freescale Communication Engine RISC", 120 => "Renesas M32C", 131 => "Altium TSK3000", 132 => "Freescale RS08", 133 => "Analog Devices SHARC family", 134 => "Cyan Technology eCOG2", 135 => "Sunplus S+core7 RISC", 136 => "New Japan Radio (NJR) 24-bit DSP", 137 => "Broadcom VideoCore III", 138 => "RISC for Lattice FPGA", 139 => "Seiko Epson C17", 140 => "Texas Instruments TMS320C6000 DSP", 141 => "Texas Instruments TMS320C2000 DSP", 142 => "Texas Instruments TMS320C55x DSP", 143 => "Texas Instruments App. Specific RISC", 144 => "Texas Instruments Prog. Realtime Unit", 160 => "STMicroelectronics 64bit VLIW DSP", 161 => "Cypress M8C", 162 => "Renesas R32C", 163 => "NXP Semi. TriMedia", 164 => "QUALCOMM DSP6", 165 => "Intel 8051 and variants", 166 => "STMicroelectronics STxP7x", 167 => "Andes Tech. compact code emb. RISC", 168 => "Cyan Technology eCOG1X", 169 => "Dallas Semi. MAXQ30 mc", 170 => "New Japan Radio (NJR) 16-bit DSP", 171 => "M2000 Reconfigurable RISC", 172 => "Cray NV2 vector architecture", 173 => "Renesas RX", 174 => "Imagination Tech. META", 175 => "MCST Elbrus", 176 => "Cyan Technology eCOG16", 177 => "National Semi. CompactRISC CR16", 178 => "Freescale Extended Time Processing Unit", 179 => "Infineon Tech. SLE9X", 180 => "Intel L10M", 181 => "Intel K10M", 182 => "reserved 182", 183 => "ARM AARCH64", 184 => "reserved 184", 185 => "Amtel 32-bit microprocessor", 186 => "STMicroelectronics STM8", 187 => "Tileta TILE64", 188 => "Tilera TILEPro", 189 => "Xilinx MicroBlaze", 190 => "NVIDIA CUDA", 191 => "Tilera TILE-Gx", 192 => "CloudShield", 193 => "KIPO-KAIST Core-A 1st gen.", 194 => "KIPO-KAIST Core-A 2nd gen.", 195 => "Synopsys ARCompact V2", 196 => "Open8 RISC", 197 => "Renesas RL78", 198 => "Broadcom VideoCore V", 199 => "Renesas 78KOR", 200 => "Freescale 56800EX DSC", 201 => "Beyond BA1", 202 => "Beyond BA2", 203 => "XMOS xCORE", 204 => "Microchip 8-bit PIC(r)", 210 => "KM211 KM32", 211 => "KM211 KMX32", 212 => "KM211 KMX16", 213 => "KM211 KMX8", 214 => "KM211 KVARC", 215 => "Paneve CDP", 216 => "Cognitive Smart Memory Processor", 217 => "Bluechip CoolEngine", 218 => "Nanoradio Optimized RISC", 219 => "CSR Kalimba", 220 => "Zilog Z80", 221 => "Controls and Data Services VISIUMcore", 222 => "FTDI Chip FT32", 223 => "Moxie processor", 224 => "AMD GPU", 243 => "RISC-V", 247 => "Linux BPF -- in-kernel virtual machine", _ => "Unknown", } }
pub enum FileClass { // Invalid class None, // 32-bit objects ElfClass32, // 64 bit objects ElfClass64, // Unknown class Invalid(u8), } #[derive(Debug)] pub enum Encoding { // Invalid data encoding None, // 2's complement, little endian LittleEndian, // 2's complement big endian BigEndian, // Uknown data encoding Invalid(u8), } #[derive(Debug)] pub enum OsAbi { // UNIX System V ABI UnixVSystem, // HP-UX HpUx, // NetBDS NetBsd, // Object uses GNU ELF extensions GnuElfExtensions, // SUN Solaris SunSolaris, // IBM AIX IbmAix, // SGI Irix SgiIrix, // FreeBSD FreeBsd, // Compaq TRU64 UNIX CompaqTru64Unix, // Novell Modesto NovellModesto, // OpenBSD OpenBsd, // ARM EABI ArmEabi, // ARM Arm, // Standalone (embedded) application Standalone, // Unknown Invalid(u8), } #[derive(Debug)] pub enum ObjectType { // No file type NoFileType, // Reolcatable file RelocatableFile, // Executable file ExecutableFile, // Shared object file SharedObjectFile, // Core file CoreFile, // Unknown Invalid(u16), } #[derive(Debug)] pub enum Version { // Invalid ELF version Unspecified, // Current version Current, // Unknown Invalid(u32), } #[derive(Debug)] pub struct ElfFileHeader { // Conglomeration of the identification bytes, must be \177ELF pub e_magic: [u8; 4], // Filpub e class pub e_class: FileClass, // Data pub encoding pub e_encoding: Encoding, // Filpub e version, value must be EV_CURRENT pub e_version_: u8, // OS ABI idpub entification pub e_os_abi: OsAbi, // ABI vpub ersion pub e_os_abi_version: u8, // Padding bytpub es pub e_padding_: [u8; 7], // Objpub ect file type pub e_type: ObjectType, // Architpub ecture pub e_machine: u16, // Objpub ect file version pub e_version: Version, // Entry point virtual addrpub ess pub e_entry: u64, // Program hpub eader table file offset pub e_phoff: u64, // Spub ection header table file offset pub e_shoff: u64, // Procpub essor-specific flags pub e_flags: u32, // ELF hpub eader size in bytes pub e_ehsize: u16, // Program hpub eader table entry size pub e_phentsize: u16, // Program hpub eader table entry count pub e_phnum: u16, // Spub ection header table entry size pub e_shentsize: u16, // Spub ection header table entry count pub e_shnum: u16, // Spub ection header string table index pub e_shstrndx: u16, } #[derive(Error, Debug)] pub enum Error { #[error("Elf magic mismatch: got: {:02X?}, expected: {:02X?}", magic, ELF_MAGIC)] ElfMagicMismatchError { magic: [u8; 4] }, #[error(transparent)] IOError(#[from] std::io::Error), } impl ElfFileHeader { pub fn new(reader: &mut Reader) -> Result<ElfFileHeader, Error> { let mut e_magic: [u8; 4] = [0; 4]; reader.read_exact(&mut e_magic)?; if e_magic[0]!= ELF_MAGIC[0] || e_magic[1]!= ELF_MAGIC[1] || e_magic[2]!= ELF_MAGIC[2] || e_magic[3]!= ELF_MAGIC[3] { return Err(Error::ElfMagicMismatchError { magic: e_magic }); } let e_class = FileClass::new(reader.read_u8()?); let e_encoding = Encoding::new(reader.read_u8()?); let e_version_ = reader.read_u8()?; let e_os_abi = OsAbi::new(reader.read_u8()?); let e_os_abi_version = reader.read_u8()?; let mut e_padding_: [u8; 7] = [0; 7]; reader.read_exact(&mut e_padding_)?; let e_type = ObjectType::new(reader.read_u16::<LittleEndian>()?); let e_machine = reader.read_u16::<LittleEndian>()?; let e_version = Version::new(reader.read_u32::<LittleEndian>()?); let e_entry = reader.read_u64::<LittleEndian>()?; let e_phoff = reader.read_u64::<LittleEndian>()?; let e_shoff = reader.read_u64::<LittleEndian>()?; let e_flags = reader.read_u32::<LittleEndian>()?; let e_ehsize = reader.read_u16::<LittleEndian>()?; let e_phentsize = reader.read_u16::<LittleEndian>()?; let e_phnum = reader.read_u16::<LittleEndian>()?; let e_shentsize = reader.read_u16::<LittleEndian>()?; let e_shnum = reader.read_u16::<LittleEndian>()?; let e_shstrndx = reader.read_u16::<LittleEndian>()?; Ok(ElfFileHeader { e_magic, e_class, e_encoding, e_version_, e_os_abi, e_os_abi_version, e_padding_, e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags, e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx, }) } } impl FileClass { fn new(value: u8) -> FileClass { match value { 0 => FileClass::None, 1 => FileClass::ElfClass32, 2 => FileClass::ElfClass64, _ => FileClass::Invalid(value), } } } impl Encoding { fn new(value: u8) -> Encoding { match value { 0 => Encoding::None, 1 => Encoding::LittleEndian, 2 => Encoding::BigEndian, _ => Encoding::Invalid(value), } } } impl OsAbi { fn new(value: u8) -> OsAbi { use OsAbi::*; match value { 0 => UnixVSystem, 1 => HpUx, 2 => NetBsd, 3 => GnuElfExtensions, 6 => SunSolaris, 7 => IbmAix, 8 => SgiIrix, 9 => FreeBsd, 10 => CompaqTru64Unix, 11 => NovellModesto, 12 => OpenBsd, 64 => ArmEabi, 97 => Arm, 255 => Standalone, _ => OsAbi::Invalid(value), } } } impl ObjectType { fn new(value: u16) -> ObjectType { use ObjectType::*; match value { 0 => NoFileType, 1 => RelocatableFile, 2 => ExecutableFile, 3 => SharedObjectFile, 4 => CoreFile, _ => Invalid(value), } } } impl Version { fn new(value: u32) -> Version { match value { 0 => Version::Unspecified, 1 => Version::Current, _ => Version::Invalid(value), } } } impl fmt::Display for ElfFileHeader { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Elf Header:")?; writeln!(f, "{:<32}{:x?}", "Magic:", self.e_magic)?; writeln!(f, "{:<32}{:?}", "Class:", self.e_class)?; writeln!(f, "{:<32}{:?}", "Encoding:", self.e_encoding)?; writeln!(f, "{:<32}{:?}", "OS/ABI:", self.e_os_abi)?; writeln!(f, "{:<32}{}", "ABI Version:", self.e_os_abi_version)?; writeln!(f, "{:<32}{:x?}", "Padding:", self.e_padding_)?; writeln!(f, "{:<32}{:?}", "Type:", self.e_type)?; writeln!(f, "{:<32}{}", "Architecture:", show_machine(self.e_machine))?; writeln!(f, "{:<32}{:?}", "Version:", self.e_version)?; writeln!(f, "{:<32}{:#x}", "Entry point address:", self.e_entry)?; writeln!(f, "{:<32}{}", "Program header offset:", self.e_phoff)?; writeln!(f, "{:<32}{}", "Section header offset:", self.e_shoff)?; writeln!(f, "{:<32}{}", "Flags:", self.e_flags)?; writeln!(f, "{:<32}{}", "Size of this header:", self.e_ehsize)?; writeln!(f, "{:<32}{}", "Size of program headers:", self.e_phentsize)?; writeln!(f, "{:<32}{}", "Number of program headers:", self.e_phnum)?; writeln!(f, "{:<32}{}", "Size of section headers:", self.e_shentsize)?; writeln!(f, "{:<32}{}", "Number of section headers:", self.e_shnum)?; writeln!( f, "{:<32}{}", "Section header strtab index:", self.e_shstrndx ) } }
#[derive(Debug)]
random_line_split
file.rs
use crate::reader::{LittleEndian, ReadBytesExt, Reader}; use std::fmt; use std::io::Read; use thiserror::Error; const ELF_MAGIC: [u8; 4] = [0x7f, b'E', b'L', b'F']; fn show_machine(value: u16) -> &'static str { match value { 0 => "No machine", 1 => "AT&T WE 32100", 2 => "SUN SPARC", 3 => "Intel 80386", 4 => "Motorola m68k family", 5 => "Motorola m88k family", 6 => "Intel MCU", 7 => "Intel 80860", 8 => "MIPS R3000 big-endian", 9 => "IBM System/370", 10 => "MIPS R3000 little-endian", 15 => "HPPA", 16 => "reserved 16", 17 => "Fujitsu VPP500", 18 => "Sun's v8plus", 19 => "Intel 80960", 20 => "PowerPC", 21 => "PowerPC 64-bit", 22 => "IBM S390", 23 => "IBM SPU/SPC", 36 => "NEC V800 series", 37 => "Fujitsu FR20", 38 => "TRW RH-32", 39 => "Motorola RCE", 40 => "ARM", 41 => "Digital Alpha", 42 => "Hitachi SH", 43 => "SPARC v9 64-bit", 44 => "Siemens Tricore", 45 => "Argonaut RISC Core", 46 => "Hitachi H8/300", 47 => "Hitachi H8/300H", 48 => "Hitachi H8S", 49 => "Hitachi H8/500", 50 => "Intel Merced", 51 => "Stanford MIPS-X", 52 => "Motorola Coldfire", 53 => "Motorola M68HC12", 54 => "Fujitsu MMA Multimedia Accelerator", 55 => "Siemens PCP", 56 => "Sony nCPU embeeded RISC", 57 => "Denso NDR1 microprocessor", 58 => "Motorola Start*Core processor", 59 => "Toyota ME16 processor", 60 => "STMicroelectronic ST100 processor", 61 => "Advanced Logic Corp. Tinyj emb.fam", 62 => "AMD x86-64 architecture", 63 => "Sony DSP Processor", 64 => "Digital PDP-10", 65 => "Digital PDP-11", 66 => "Siemens FX66 microcontroller", 67 => "STMicroelectronics ST9+ 8/16 mc", 68 => "STmicroelectronics ST7 8 bit mc", 69 => "Motorola MC68HC16 microcontroller", 70 => "Motorola MC68HC11 microcontroller", 71 => "Motorola MC68HC08 microcontroller", 72 => "Motorola MC68HC05 microcontroller", 73 => "Silicon Graphics SVx", 74 => "STMicroelectronics ST19 8 bit mc", 75 => "Digital VAX", 76 => "Axis Communications 32-bit emb.proc", 77 => "Infineon Technologies 32-bit emb.proc", 78 => "Element 14 64-bit DSP Processor", 79 => "LSI Logic 16-bit DSP Processor", 80 => "Donald Knuth's educational 64-bit proc", 81 => "Harvard University machine-independent object files", 82 => "SiTera Prism", 83 => "Atmel AVR 8-bit microcontroller", 84 => "Fujitsu FR30", 85 => "Mitsubishi D10V", 86 => "Mitsubishi D30V", 87 => "NEC v850", 88 => "Mitsubishi M32R", 89 => "Matsushita MN10300", 90 => "Matsushita MN10200", 91 => "picoJava", 92 => "OpenRISC 32-bit embedded processor", 93 => "ARC International ARCompact", 94 => "Tensilica Xtensa Architecture", 95 => "Alphamosaic VideoCore", 96 => "Thompson Multimedia General Purpose Proc", 97 => "National Semi. 32000", 98 => "Tenor Network TPC", 99 => "Trebia SNP 1000", 100 => "STMicroelectronics ST200", 101 => "Ubicom IP2xxx", 102 => "MAX processor", 103 => "National Semi. CompactRISC", 104 => "Fujitsu F2MC16", 105 => "Texas Instruments msp430", 106 => "Analog Devices Blackfin DSP", 107 => "Seiko Epson S1C33 family", 108 => "Sharp embedded microprocessor", 109 => "Arca RISC", 110 => "PKU-Unity & MPRC Peking Uni. mc series", 111 => "eXcess configurable cpu", 112 => "Icera Semi. Deep Execution Processor", 113 => "Altera Nios II", 114 => "National Semi. CompactRISC CRX", 115 => "Motorola XGATE", 116 => "Infineon C16x/XC16x", 117 => "Renesas M16C", 118 => "Microchip Technology dsPIC30F", 119 => "Freescale Communication Engine RISC", 120 => "Renesas M32C", 131 => "Altium TSK3000", 132 => "Freescale RS08", 133 => "Analog Devices SHARC family", 134 => "Cyan Technology eCOG2", 135 => "Sunplus S+core7 RISC", 136 => "New Japan Radio (NJR) 24-bit DSP", 137 => "Broadcom VideoCore III", 138 => "RISC for Lattice FPGA", 139 => "Seiko Epson C17", 140 => "Texas Instruments TMS320C6000 DSP", 141 => "Texas Instruments TMS320C2000 DSP", 142 => "Texas Instruments TMS320C55x DSP", 143 => "Texas Instruments App. Specific RISC", 144 => "Texas Instruments Prog. Realtime Unit", 160 => "STMicroelectronics 64bit VLIW DSP", 161 => "Cypress M8C", 162 => "Renesas R32C", 163 => "NXP Semi. TriMedia", 164 => "QUALCOMM DSP6", 165 => "Intel 8051 and variants", 166 => "STMicroelectronics STxP7x", 167 => "Andes Tech. compact code emb. RISC", 168 => "Cyan Technology eCOG1X", 169 => "Dallas Semi. MAXQ30 mc", 170 => "New Japan Radio (NJR) 16-bit DSP", 171 => "M2000 Reconfigurable RISC", 172 => "Cray NV2 vector architecture", 173 => "Renesas RX", 174 => "Imagination Tech. META", 175 => "MCST Elbrus", 176 => "Cyan Technology eCOG16", 177 => "National Semi. CompactRISC CR16", 178 => "Freescale Extended Time Processing Unit", 179 => "Infineon Tech. SLE9X", 180 => "Intel L10M", 181 => "Intel K10M", 182 => "reserved 182", 183 => "ARM AARCH64", 184 => "reserved 184", 185 => "Amtel 32-bit microprocessor", 186 => "STMicroelectronics STM8", 187 => "Tileta TILE64", 188 => "Tilera TILEPro", 189 => "Xilinx MicroBlaze", 190 => "NVIDIA CUDA", 191 => "Tilera TILE-Gx", 192 => "CloudShield", 193 => "KIPO-KAIST Core-A 1st gen.", 194 => "KIPO-KAIST Core-A 2nd gen.", 195 => "Synopsys ARCompact V2", 196 => "Open8 RISC", 197 => "Renesas RL78", 198 => "Broadcom VideoCore V", 199 => "Renesas 78KOR", 200 => "Freescale 56800EX DSC", 201 => "Beyond BA1", 202 => "Beyond BA2", 203 => "XMOS xCORE", 204 => "Microchip 8-bit PIC(r)", 210 => "KM211 KM32", 211 => "KM211 KMX32", 212 => "KM211 KMX16", 213 => "KM211 KMX8", 214 => "KM211 KVARC", 215 => "Paneve CDP", 216 => "Cognitive Smart Memory Processor", 217 => "Bluechip CoolEngine", 218 => "Nanoradio Optimized RISC", 219 => "CSR Kalimba", 220 => "Zilog Z80", 221 => "Controls and Data Services VISIUMcore", 222 => "FTDI Chip FT32", 223 => "Moxie processor", 224 => "AMD GPU", 243 => "RISC-V", 247 => "Linux BPF -- in-kernel virtual machine", _ => "Unknown", } } #[derive(Debug)] pub enum FileClass { // Invalid class None, // 32-bit objects ElfClass32, // 64 bit objects ElfClass64, // Unknown class Invalid(u8), } #[derive(Debug)] pub enum
{ // Invalid data encoding None, // 2's complement, little endian LittleEndian, // 2's complement big endian BigEndian, // Uknown data encoding Invalid(u8), } #[derive(Debug)] pub enum OsAbi { // UNIX System V ABI UnixVSystem, // HP-UX HpUx, // NetBDS NetBsd, // Object uses GNU ELF extensions GnuElfExtensions, // SUN Solaris SunSolaris, // IBM AIX IbmAix, // SGI Irix SgiIrix, // FreeBSD FreeBsd, // Compaq TRU64 UNIX CompaqTru64Unix, // Novell Modesto NovellModesto, // OpenBSD OpenBsd, // ARM EABI ArmEabi, // ARM Arm, // Standalone (embedded) application Standalone, // Unknown Invalid(u8), } #[derive(Debug)] pub enum ObjectType { // No file type NoFileType, // Reolcatable file RelocatableFile, // Executable file ExecutableFile, // Shared object file SharedObjectFile, // Core file CoreFile, // Unknown Invalid(u16), } #[derive(Debug)] pub enum Version { // Invalid ELF version Unspecified, // Current version Current, // Unknown Invalid(u32), } #[derive(Debug)] pub struct ElfFileHeader { // Conglomeration of the identification bytes, must be \177ELF pub e_magic: [u8; 4], // Filpub e class pub e_class: FileClass, // Data pub encoding pub e_encoding: Encoding, // Filpub e version, value must be EV_CURRENT pub e_version_: u8, // OS ABI idpub entification pub e_os_abi: OsAbi, // ABI vpub ersion pub e_os_abi_version: u8, // Padding bytpub es pub e_padding_: [u8; 7], // Objpub ect file type pub e_type: ObjectType, // Architpub ecture pub e_machine: u16, // Objpub ect file version pub e_version: Version, // Entry point virtual addrpub ess pub e_entry: u64, // Program hpub eader table file offset pub e_phoff: u64, // Spub ection header table file offset pub e_shoff: u64, // Procpub essor-specific flags pub e_flags: u32, // ELF hpub eader size in bytes pub e_ehsize: u16, // Program hpub eader table entry size pub e_phentsize: u16, // Program hpub eader table entry count pub e_phnum: u16, // Spub ection header table entry size pub e_shentsize: u16, // Spub ection header table entry count pub e_shnum: u16, // Spub ection header string table index pub e_shstrndx: u16, } #[derive(Error, Debug)] pub enum Error { #[error("Elf magic mismatch: got: {:02X?}, expected: {:02X?}", magic, ELF_MAGIC)] ElfMagicMismatchError { magic: [u8; 4] }, #[error(transparent)] IOError(#[from] std::io::Error), } impl ElfFileHeader { pub fn new(reader: &mut Reader) -> Result<ElfFileHeader, Error> { let mut e_magic: [u8; 4] = [0; 4]; reader.read_exact(&mut e_magic)?; if e_magic[0]!= ELF_MAGIC[0] || e_magic[1]!= ELF_MAGIC[1] || e_magic[2]!= ELF_MAGIC[2] || e_magic[3]!= ELF_MAGIC[3] { return Err(Error::ElfMagicMismatchError { magic: e_magic }); } let e_class = FileClass::new(reader.read_u8()?); let e_encoding = Encoding::new(reader.read_u8()?); let e_version_ = reader.read_u8()?; let e_os_abi = OsAbi::new(reader.read_u8()?); let e_os_abi_version = reader.read_u8()?; let mut e_padding_: [u8; 7] = [0; 7]; reader.read_exact(&mut e_padding_)?; let e_type = ObjectType::new(reader.read_u16::<LittleEndian>()?); let e_machine = reader.read_u16::<LittleEndian>()?; let e_version = Version::new(reader.read_u32::<LittleEndian>()?); let e_entry = reader.read_u64::<LittleEndian>()?; let e_phoff = reader.read_u64::<LittleEndian>()?; let e_shoff = reader.read_u64::<LittleEndian>()?; let e_flags = reader.read_u32::<LittleEndian>()?; let e_ehsize = reader.read_u16::<LittleEndian>()?; let e_phentsize = reader.read_u16::<LittleEndian>()?; let e_phnum = reader.read_u16::<LittleEndian>()?; let e_shentsize = reader.read_u16::<LittleEndian>()?; let e_shnum = reader.read_u16::<LittleEndian>()?; let e_shstrndx = reader.read_u16::<LittleEndian>()?; Ok(ElfFileHeader { e_magic, e_class, e_encoding, e_version_, e_os_abi, e_os_abi_version, e_padding_, e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags, e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx, }) } } impl FileClass { fn new(value: u8) -> FileClass { match value { 0 => FileClass::None, 1 => FileClass::ElfClass32, 2 => FileClass::ElfClass64, _ => FileClass::Invalid(value), } } } impl Encoding { fn new(value: u8) -> Encoding { match value { 0 => Encoding::None, 1 => Encoding::LittleEndian, 2 => Encoding::BigEndian, _ => Encoding::Invalid(value), } } } impl OsAbi { fn new(value: u8) -> OsAbi { use OsAbi::*; match value { 0 => UnixVSystem, 1 => HpUx, 2 => NetBsd, 3 => GnuElfExtensions, 6 => SunSolaris, 7 => IbmAix, 8 => SgiIrix, 9 => FreeBsd, 10 => CompaqTru64Unix, 11 => NovellModesto, 12 => OpenBsd, 64 => ArmEabi, 97 => Arm, 255 => Standalone, _ => OsAbi::Invalid(value), } } } impl ObjectType { fn new(value: u16) -> ObjectType { use ObjectType::*; match value { 0 => NoFileType, 1 => RelocatableFile, 2 => ExecutableFile, 3 => SharedObjectFile, 4 => CoreFile, _ => Invalid(value), } } } impl Version { fn new(value: u32) -> Version { match value { 0 => Version::Unspecified, 1 => Version::Current, _ => Version::Invalid(value), } } } impl fmt::Display for ElfFileHeader { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Elf Header:")?; writeln!(f, "{:<32}{:x?}", "Magic:", self.e_magic)?; writeln!(f, "{:<32}{:?}", "Class:", self.e_class)?; writeln!(f, "{:<32}{:?}", "Encoding:", self.e_encoding)?; writeln!(f, "{:<32}{:?}", "OS/ABI:", self.e_os_abi)?; writeln!(f, "{:<32}{}", "ABI Version:", self.e_os_abi_version)?; writeln!(f, "{:<32}{:x?}", "Padding:", self.e_padding_)?; writeln!(f, "{:<32}{:?}", "Type:", self.e_type)?; writeln!(f, "{:<32}{}", "Architecture:", show_machine(self.e_machine))?; writeln!(f, "{:<32}{:?}", "Version:", self.e_version)?; writeln!(f, "{:<32}{:#x}", "Entry point address:", self.e_entry)?; writeln!(f, "{:<32}{}", "Program header offset:", self.e_phoff)?; writeln!(f, "{:<32}{}", "Section header offset:", self.e_shoff)?; writeln!(f, "{:<32}{}", "Flags:", self.e_flags)?; writeln!(f, "{:<32}{}", "Size of this header:", self.e_ehsize)?; writeln!(f, "{:<32}{}", "Size of program headers:", self.e_phentsize)?; writeln!(f, "{:<32}{}", "Number of program headers:", self.e_phnum)?; writeln!(f, "{:<32}{}", "Size of section headers:", self.e_shentsize)?; writeln!(f, "{:<32}{}", "Number of section headers:", self.e_shnum)?; writeln!( f, "{:<32}{}", "Section header strtab index:", self.e_shstrndx ) } }
Encoding
identifier_name
mod.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use deno_core::error::bad_resource_id; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::op; use deno_core::OpState; use libz_sys::*; use std::borrow::Cow; use std::cell::RefCell; use std::future::Future; use std::rc::Rc; mod alloc; pub mod brotli; mod mode; mod stream; use mode::Flush; use mode::Mode; use self::stream::StreamWrapper; #[inline] fn check(condition: bool, msg: &str) -> Result<(), AnyError> { if condition { Ok(()) } else { Err(type_error(msg.to_string())) } } #[inline] fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> { state .resource_table .get::<Zlib>(handle) .map_err(|_| bad_resource_id()) } #[derive(Default)] struct ZlibInner { dictionary: Option<Vec<u8>>, err: i32, flush: Flush, init_done: bool, level: i32, mem_level: i32, mode: Mode, strategy: i32, window_bits: i32, write_in_progress: bool, pending_close: bool, gzib_id_bytes_read: u32, strm: StreamWrapper, } const GZIP_HEADER_ID1: u8 = 0x1f; const GZIP_HEADER_ID2: u8 = 0x8b; impl ZlibInner { #[allow(clippy::too_many_arguments)] fn start_write( &mut self, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, flush: Flush, ) -> Result<(), AnyError> { check(self.init_done, "write before init")?; check(!self.write_in_progress, "write already in progress")?; check(!self.pending_close, "close already in progress")?; self.write_in_progress = true; let next_in = input .get(in_off as usize..in_off as usize + in_len as usize) .ok_or_else(|| type_error("invalid input range"))? .as_ptr() as *mut _; let next_out = out .get_mut(out_off as usize..out_off as usize + out_len as usize) .ok_or_else(|| type_error("invalid output range"))? .as_mut_ptr(); self.strm.avail_in = in_len; self.strm.next_in = next_in; self.strm.avail_out = out_len; self.strm.next_out = next_out; self.flush = flush; Ok(()) } fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> { self.flush = flush; match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => { self.err = self.strm.deflate(flush); } // Auto-detect mode. Mode::Unzip if self.strm.avail_in > 0 => 'blck: { let mut next_expected_header_byte = Some(0); // SAFETY: `self.strm.next_in` is valid pointer to the input buffer. // `self.strm.avail_in` is the length of the input buffer that is only set by // `start_write`. let strm = unsafe { std::slice::from_raw_parts( self.strm.next_in, self.strm.avail_in as usize, ) }; if self.gzib_id_bytes_read == 0 { if strm[0] == GZIP_HEADER_ID1 { self.gzib_id_bytes_read = 1; next_expected_header_byte = Some(1); // Not enough. if self.strm.avail_in == 1 { break 'blck; } } else { self.mode = Mode::Inflate; next_expected_header_byte = None; } } if self.gzib_id_bytes_read == 1 { let byte = match next_expected_header_byte { Some(i) => strm[i], None => break 'blck, }; if byte == GZIP_HEADER_ID2 { self.gzib_id_bytes_read = 2; self.mode = Mode::Gunzip; } else { self.mode = Mode::Inflate; } } else if next_expected_header_byte.is_some() { return Err(type_error( "invalid number of gzip magic number bytes read", )); } } _ => {} } match self.mode { Mode::Inflate | Mode::Gunzip | Mode::InflateRaw // We're still reading the header. | Mode::Unzip => { self.err = self.strm.inflate(self.flush); // TODO(@littledivy): Use if let chain when it is stable. // https://github.com/rust-lang/rust/issues/53667 // // Data was encoded with dictionary if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) { self.err = self.strm.inflate_set_dictionary(dictionary); if self.err == Z_OK { self.err = self.strm.inflate(flush); } else if self.err == Z_DATA_ERROR { self.err = Z_NEED_DICT; } } while self.strm.avail_in > 0 && self.mode == Mode::Gunzip && self.err == Z_STREAM_END // SAFETY: `strm` is a valid pointer to zlib strm. // `strm.next_in` is initialized to the input buffer. && unsafe { *self.strm.next_in }!= 0x00 { self.err = self.strm.reset(self.mode); self.err = self.strm.inflate(flush); } } _ => {} } let done = self.strm.avail_out!= 0 && self.flush == Flush::Finish; // We're are not done yet, but output buffer is full if self.err == Z_BUF_ERROR &&!done { // Set to Z_OK to avoid reporting the error in JS. self.err = Z_OK; } self.write_in_progress = false; Ok(()) } fn init_stream(&mut self) -> Result<(), AnyError> { match self.mode { Mode::Gzip | Mode::Gunzip => self.window_bits += 16, Mode::Unzip => self.window_bits += 32, Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1, _ => {} } self.err = match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init( self.level, self.window_bits, self.mem_level, self.strategy, ), Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => { self.strm.inflate_init(self.window_bits) } Mode::None => return Err(type_error("Unknown mode")), }; self.write_in_progress = false; self.init_done = true; Ok(()) } fn close(&mut self) -> Result<bool, AnyError> { if self.write_in_progress { self.pending_close = true; return Ok(false); } self.pending_close = false; check(self.init_done, "close before init")?; self.strm.end(self.mode); self.mode = Mode::None; Ok(true) } fn reset_stream(&mut self) -> Result<(), AnyError> { self.err = self.strm.reset(self.mode); Ok(()) } } struct Zlib { inner: RefCell<ZlibInner>, } impl deno_core::Resource for Zlib { fn name(&self) -> Cow<str> { "zlib".into() } } #[op] pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> { let mode = Mode::try_from(mode)?; let inner = ZlibInner { mode, ..Default::default() }; Ok(state.resource_table.add(Zlib { inner: RefCell::new(inner), })) } #[op] pub fn
(state: &mut OpState, handle: u32) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); // If there is a pending write, defer the close until the write is done. zlib.close()?; Ok(()) } #[op] pub fn op_zlib_write_async( state: Rc<RefCell<OpState>>, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, ) -> Result< impl Future<Output = Result<(i32, u32, u32), AnyError>> +'static, AnyError, > { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut strm = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; let state = state.clone(); Ok(async move { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.do_write(flush)?; Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in)) }) } #[op] pub fn op_zlib_write( state: &mut OpState, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, result: &mut [u32], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; zlib.do_write(flush)?; result[0] = zlib.strm.avail_out; result[1] = zlib.strm.avail_in; Ok(zlib.err) } #[op] pub fn op_zlib_init( state: &mut OpState, handle: u32, level: i32, window_bits: i32, mem_level: i32, strategy: i32, dictionary: &[u8], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); check((8..=15).contains(&window_bits), "invalid windowBits")?; check((-1..=9).contains(&level), "invalid level")?; check((1..=9).contains(&mem_level), "invalid memLevel")?; check( strategy == Z_DEFAULT_STRATEGY || strategy == Z_FILTERED || strategy == Z_HUFFMAN_ONLY || strategy == Z_RLE || strategy == Z_FIXED, "invalid strategy", )?; zlib.level = level; zlib.window_bits = window_bits; zlib.mem_level = mem_level; zlib.strategy = strategy; zlib.flush = Flush::None; zlib.err = Z_OK; zlib.init_stream()?; zlib.dictionary = if!dictionary.is_empty() { Some(dictionary.to_vec()) } else { None }; Ok(zlib.err) } #[op] pub fn op_zlib_reset( state: &mut OpState, handle: u32, ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.reset_stream()?; Ok(zlib.err) } #[op] pub fn op_zlib_close_if_pending( state: &mut OpState, handle: u32, ) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let pending_close = { let mut zlib = resource.inner.borrow_mut(); zlib.write_in_progress = false; zlib.pending_close }; if pending_close { drop(resource); state.resource_table.close(handle)?; } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn zlib_start_write() { // buffer, length, should pass type WriteVector = (&'static [u8], u32, u32, bool); const WRITE_VECTORS: [WriteVector; 8] = [ (b"Hello", 5, 0, true), (b"H", 1, 0, true), (b"", 0, 0, true), // Overrun the buffer (b"H", 5, 0, false), (b"ello", 5, 0, false), (b"Hello", 5, 1, false), (b"H", 1, 1, false), (b"", 0, 1, false), ]; for (input, len, offset, expected) in WRITE_VECTORS.iter() { let mut stream = ZlibInner { mode: Mode::Inflate, ..Default::default() }; stream.init_stream().unwrap(); assert_eq!(stream.err, Z_OK); assert_eq!( stream .start_write(input, *offset, *len, &mut [], 0, 0, Flush::None) .is_ok(), *expected ); assert_eq!(stream.err, Z_OK); stream.close().unwrap(); } } }
op_zlib_close
identifier_name
mod.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use deno_core::error::bad_resource_id; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::op; use deno_core::OpState; use libz_sys::*; use std::borrow::Cow; use std::cell::RefCell; use std::future::Future; use std::rc::Rc; mod alloc; pub mod brotli; mod mode; mod stream; use mode::Flush; use mode::Mode; use self::stream::StreamWrapper; #[inline] fn check(condition: bool, msg: &str) -> Result<(), AnyError> { if condition { Ok(()) } else { Err(type_error(msg.to_string())) } } #[inline] fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> { state .resource_table .get::<Zlib>(handle) .map_err(|_| bad_resource_id()) } #[derive(Default)] struct ZlibInner { dictionary: Option<Vec<u8>>, err: i32, flush: Flush, init_done: bool, level: i32, mem_level: i32, mode: Mode, strategy: i32, window_bits: i32, write_in_progress: bool, pending_close: bool, gzib_id_bytes_read: u32, strm: StreamWrapper, } const GZIP_HEADER_ID1: u8 = 0x1f; const GZIP_HEADER_ID2: u8 = 0x8b; impl ZlibInner { #[allow(clippy::too_many_arguments)] fn start_write( &mut self, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, flush: Flush, ) -> Result<(), AnyError> { check(self.init_done, "write before init")?; check(!self.write_in_progress, "write already in progress")?; check(!self.pending_close, "close already in progress")?; self.write_in_progress = true; let next_in = input .get(in_off as usize..in_off as usize + in_len as usize) .ok_or_else(|| type_error("invalid input range"))? .as_ptr() as *mut _; let next_out = out .get_mut(out_off as usize..out_off as usize + out_len as usize) .ok_or_else(|| type_error("invalid output range"))? .as_mut_ptr(); self.strm.avail_in = in_len; self.strm.next_in = next_in; self.strm.avail_out = out_len; self.strm.next_out = next_out; self.flush = flush; Ok(()) } fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> { self.flush = flush; match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => { self.err = self.strm.deflate(flush); } // Auto-detect mode. Mode::Unzip if self.strm.avail_in > 0 => 'blck: { let mut next_expected_header_byte = Some(0); // SAFETY: `self.strm.next_in` is valid pointer to the input buffer. // `self.strm.avail_in` is the length of the input buffer that is only set by // `start_write`. let strm = unsafe { std::slice::from_raw_parts( self.strm.next_in, self.strm.avail_in as usize, ) }; if self.gzib_id_bytes_read == 0 { if strm[0] == GZIP_HEADER_ID1 { self.gzib_id_bytes_read = 1; next_expected_header_byte = Some(1); // Not enough. if self.strm.avail_in == 1 { break 'blck; } } else { self.mode = Mode::Inflate; next_expected_header_byte = None; } } if self.gzib_id_bytes_read == 1 { let byte = match next_expected_header_byte { Some(i) => strm[i], None => break 'blck, }; if byte == GZIP_HEADER_ID2 { self.gzib_id_bytes_read = 2; self.mode = Mode::Gunzip; } else { self.mode = Mode::Inflate; } } else if next_expected_header_byte.is_some() { return Err(type_error( "invalid number of gzip magic number bytes read", )); } } _ => {} } match self.mode { Mode::Inflate | Mode::Gunzip | Mode::InflateRaw // We're still reading the header. | Mode::Unzip => { self.err = self.strm.inflate(self.flush); // TODO(@littledivy): Use if let chain when it is stable. // https://github.com/rust-lang/rust/issues/53667 // // Data was encoded with dictionary if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) { self.err = self.strm.inflate_set_dictionary(dictionary); if self.err == Z_OK { self.err = self.strm.inflate(flush); } else if self.err == Z_DATA_ERROR { self.err = Z_NEED_DICT; } } while self.strm.avail_in > 0 && self.mode == Mode::Gunzip && self.err == Z_STREAM_END // SAFETY: `strm` is a valid pointer to zlib strm. // `strm.next_in` is initialized to the input buffer. && unsafe { *self.strm.next_in }!= 0x00 { self.err = self.strm.reset(self.mode); self.err = self.strm.inflate(flush); } } _ => {} } let done = self.strm.avail_out!= 0 && self.flush == Flush::Finish; // We're are not done yet, but output buffer is full if self.err == Z_BUF_ERROR &&!done { // Set to Z_OK to avoid reporting the error in JS. self.err = Z_OK; } self.write_in_progress = false; Ok(()) } fn init_stream(&mut self) -> Result<(), AnyError> { match self.mode { Mode::Gzip | Mode::Gunzip => self.window_bits += 16, Mode::Unzip => self.window_bits += 32, Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1, _ =>
} self.err = match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init( self.level, self.window_bits, self.mem_level, self.strategy, ), Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => { self.strm.inflate_init(self.window_bits) } Mode::None => return Err(type_error("Unknown mode")), }; self.write_in_progress = false; self.init_done = true; Ok(()) } fn close(&mut self) -> Result<bool, AnyError> { if self.write_in_progress { self.pending_close = true; return Ok(false); } self.pending_close = false; check(self.init_done, "close before init")?; self.strm.end(self.mode); self.mode = Mode::None; Ok(true) } fn reset_stream(&mut self) -> Result<(), AnyError> { self.err = self.strm.reset(self.mode); Ok(()) } } struct Zlib { inner: RefCell<ZlibInner>, } impl deno_core::Resource for Zlib { fn name(&self) -> Cow<str> { "zlib".into() } } #[op] pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> { let mode = Mode::try_from(mode)?; let inner = ZlibInner { mode, ..Default::default() }; Ok(state.resource_table.add(Zlib { inner: RefCell::new(inner), })) } #[op] pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); // If there is a pending write, defer the close until the write is done. zlib.close()?; Ok(()) } #[op] pub fn op_zlib_write_async( state: Rc<RefCell<OpState>>, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, ) -> Result< impl Future<Output = Result<(i32, u32, u32), AnyError>> +'static, AnyError, > { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut strm = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; let state = state.clone(); Ok(async move { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.do_write(flush)?; Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in)) }) } #[op] pub fn op_zlib_write( state: &mut OpState, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, result: &mut [u32], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; zlib.do_write(flush)?; result[0] = zlib.strm.avail_out; result[1] = zlib.strm.avail_in; Ok(zlib.err) } #[op] pub fn op_zlib_init( state: &mut OpState, handle: u32, level: i32, window_bits: i32, mem_level: i32, strategy: i32, dictionary: &[u8], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); check((8..=15).contains(&window_bits), "invalid windowBits")?; check((-1..=9).contains(&level), "invalid level")?; check((1..=9).contains(&mem_level), "invalid memLevel")?; check( strategy == Z_DEFAULT_STRATEGY || strategy == Z_FILTERED || strategy == Z_HUFFMAN_ONLY || strategy == Z_RLE || strategy == Z_FIXED, "invalid strategy", )?; zlib.level = level; zlib.window_bits = window_bits; zlib.mem_level = mem_level; zlib.strategy = strategy; zlib.flush = Flush::None; zlib.err = Z_OK; zlib.init_stream()?; zlib.dictionary = if!dictionary.is_empty() { Some(dictionary.to_vec()) } else { None }; Ok(zlib.err) } #[op] pub fn op_zlib_reset( state: &mut OpState, handle: u32, ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.reset_stream()?; Ok(zlib.err) } #[op] pub fn op_zlib_close_if_pending( state: &mut OpState, handle: u32, ) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let pending_close = { let mut zlib = resource.inner.borrow_mut(); zlib.write_in_progress = false; zlib.pending_close }; if pending_close { drop(resource); state.resource_table.close(handle)?; } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn zlib_start_write() { // buffer, length, should pass type WriteVector = (&'static [u8], u32, u32, bool); const WRITE_VECTORS: [WriteVector; 8] = [ (b"Hello", 5, 0, true), (b"H", 1, 0, true), (b"", 0, 0, true), // Overrun the buffer (b"H", 5, 0, false), (b"ello", 5, 0, false), (b"Hello", 5, 1, false), (b"H", 1, 1, false), (b"", 0, 1, false), ]; for (input, len, offset, expected) in WRITE_VECTORS.iter() { let mut stream = ZlibInner { mode: Mode::Inflate, ..Default::default() }; stream.init_stream().unwrap(); assert_eq!(stream.err, Z_OK); assert_eq!( stream .start_write(input, *offset, *len, &mut [], 0, 0, Flush::None) .is_ok(), *expected ); assert_eq!(stream.err, Z_OK); stream.close().unwrap(); } } }
{}
conditional_block
mod.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use deno_core::error::bad_resource_id; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::op; use deno_core::OpState; use libz_sys::*; use std::borrow::Cow; use std::cell::RefCell; use std::future::Future; use std::rc::Rc; mod alloc; pub mod brotli; mod mode; mod stream; use mode::Flush; use mode::Mode; use self::stream::StreamWrapper; #[inline] fn check(condition: bool, msg: &str) -> Result<(), AnyError> { if condition { Ok(()) } else { Err(type_error(msg.to_string())) } } #[inline] fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> { state .resource_table .get::<Zlib>(handle) .map_err(|_| bad_resource_id()) } #[derive(Default)] struct ZlibInner { dictionary: Option<Vec<u8>>, err: i32, flush: Flush, init_done: bool, level: i32, mem_level: i32, mode: Mode, strategy: i32, window_bits: i32, write_in_progress: bool, pending_close: bool, gzib_id_bytes_read: u32, strm: StreamWrapper, } const GZIP_HEADER_ID1: u8 = 0x1f; const GZIP_HEADER_ID2: u8 = 0x8b; impl ZlibInner { #[allow(clippy::too_many_arguments)] fn start_write( &mut self, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, flush: Flush, ) -> Result<(), AnyError> { check(self.init_done, "write before init")?; check(!self.write_in_progress, "write already in progress")?; check(!self.pending_close, "close already in progress")?; self.write_in_progress = true; let next_in = input .get(in_off as usize..in_off as usize + in_len as usize) .ok_or_else(|| type_error("invalid input range"))? .as_ptr() as *mut _; let next_out = out .get_mut(out_off as usize..out_off as usize + out_len as usize) .ok_or_else(|| type_error("invalid output range"))? .as_mut_ptr(); self.strm.avail_in = in_len; self.strm.next_in = next_in; self.strm.avail_out = out_len; self.strm.next_out = next_out; self.flush = flush; Ok(()) } fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> { self.flush = flush; match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => { self.err = self.strm.deflate(flush); } // Auto-detect mode. Mode::Unzip if self.strm.avail_in > 0 => 'blck: { let mut next_expected_header_byte = Some(0); // SAFETY: `self.strm.next_in` is valid pointer to the input buffer. // `self.strm.avail_in` is the length of the input buffer that is only set by // `start_write`. let strm = unsafe { std::slice::from_raw_parts( self.strm.next_in, self.strm.avail_in as usize, ) }; if self.gzib_id_bytes_read == 0 { if strm[0] == GZIP_HEADER_ID1 { self.gzib_id_bytes_read = 1; next_expected_header_byte = Some(1); // Not enough. if self.strm.avail_in == 1 { break 'blck; } } else { self.mode = Mode::Inflate; next_expected_header_byte = None; } } if self.gzib_id_bytes_read == 1 { let byte = match next_expected_header_byte { Some(i) => strm[i], None => break 'blck, }; if byte == GZIP_HEADER_ID2 { self.gzib_id_bytes_read = 2; self.mode = Mode::Gunzip; } else { self.mode = Mode::Inflate; } } else if next_expected_header_byte.is_some() { return Err(type_error( "invalid number of gzip magic number bytes read", )); } } _ => {} } match self.mode { Mode::Inflate | Mode::Gunzip | Mode::InflateRaw // We're still reading the header. | Mode::Unzip => { self.err = self.strm.inflate(self.flush); // TODO(@littledivy): Use if let chain when it is stable. // https://github.com/rust-lang/rust/issues/53667 // // Data was encoded with dictionary if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) { self.err = self.strm.inflate_set_dictionary(dictionary); if self.err == Z_OK { self.err = self.strm.inflate(flush); } else if self.err == Z_DATA_ERROR { self.err = Z_NEED_DICT; } } while self.strm.avail_in > 0 && self.mode == Mode::Gunzip && self.err == Z_STREAM_END // SAFETY: `strm` is a valid pointer to zlib strm. // `strm.next_in` is initialized to the input buffer. && unsafe { *self.strm.next_in }!= 0x00 { self.err = self.strm.reset(self.mode); self.err = self.strm.inflate(flush); } } _ => {} } let done = self.strm.avail_out!= 0 && self.flush == Flush::Finish; // We're are not done yet, but output buffer is full if self.err == Z_BUF_ERROR &&!done { // Set to Z_OK to avoid reporting the error in JS. self.err = Z_OK; } self.write_in_progress = false; Ok(()) } fn init_stream(&mut self) -> Result<(), AnyError> { match self.mode { Mode::Gzip | Mode::Gunzip => self.window_bits += 16, Mode::Unzip => self.window_bits += 32, Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1, _ => {} } self.err = match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init( self.level, self.window_bits, self.mem_level, self.strategy, ), Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => { self.strm.inflate_init(self.window_bits) } Mode::None => return Err(type_error("Unknown mode")), }; self.write_in_progress = false; self.init_done = true; Ok(()) } fn close(&mut self) -> Result<bool, AnyError> { if self.write_in_progress { self.pending_close = true; return Ok(false); } self.pending_close = false; check(self.init_done, "close before init")?; self.strm.end(self.mode); self.mode = Mode::None; Ok(true) } fn reset_stream(&mut self) -> Result<(), AnyError>
} struct Zlib { inner: RefCell<ZlibInner>, } impl deno_core::Resource for Zlib { fn name(&self) -> Cow<str> { "zlib".into() } } #[op] pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> { let mode = Mode::try_from(mode)?; let inner = ZlibInner { mode, ..Default::default() }; Ok(state.resource_table.add(Zlib { inner: RefCell::new(inner), })) } #[op] pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); // If there is a pending write, defer the close until the write is done. zlib.close()?; Ok(()) } #[op] pub fn op_zlib_write_async( state: Rc<RefCell<OpState>>, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, ) -> Result< impl Future<Output = Result<(i32, u32, u32), AnyError>> +'static, AnyError, > { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut strm = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; let state = state.clone(); Ok(async move { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.do_write(flush)?; Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in)) }) } #[op] pub fn op_zlib_write( state: &mut OpState, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, result: &mut [u32], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; zlib.do_write(flush)?; result[0] = zlib.strm.avail_out; result[1] = zlib.strm.avail_in; Ok(zlib.err) } #[op] pub fn op_zlib_init( state: &mut OpState, handle: u32, level: i32, window_bits: i32, mem_level: i32, strategy: i32, dictionary: &[u8], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); check((8..=15).contains(&window_bits), "invalid windowBits")?; check((-1..=9).contains(&level), "invalid level")?; check((1..=9).contains(&mem_level), "invalid memLevel")?; check( strategy == Z_DEFAULT_STRATEGY || strategy == Z_FILTERED || strategy == Z_HUFFMAN_ONLY || strategy == Z_RLE || strategy == Z_FIXED, "invalid strategy", )?; zlib.level = level; zlib.window_bits = window_bits; zlib.mem_level = mem_level; zlib.strategy = strategy; zlib.flush = Flush::None; zlib.err = Z_OK; zlib.init_stream()?; zlib.dictionary = if!dictionary.is_empty() { Some(dictionary.to_vec()) } else { None }; Ok(zlib.err) } #[op] pub fn op_zlib_reset( state: &mut OpState, handle: u32, ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.reset_stream()?; Ok(zlib.err) } #[op] pub fn op_zlib_close_if_pending( state: &mut OpState, handle: u32, ) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let pending_close = { let mut zlib = resource.inner.borrow_mut(); zlib.write_in_progress = false; zlib.pending_close }; if pending_close { drop(resource); state.resource_table.close(handle)?; } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn zlib_start_write() { // buffer, length, should pass type WriteVector = (&'static [u8], u32, u32, bool); const WRITE_VECTORS: [WriteVector; 8] = [ (b"Hello", 5, 0, true), (b"H", 1, 0, true), (b"", 0, 0, true), // Overrun the buffer (b"H", 5, 0, false), (b"ello", 5, 0, false), (b"Hello", 5, 1, false), (b"H", 1, 1, false), (b"", 0, 1, false), ]; for (input, len, offset, expected) in WRITE_VECTORS.iter() { let mut stream = ZlibInner { mode: Mode::Inflate, ..Default::default() }; stream.init_stream().unwrap(); assert_eq!(stream.err, Z_OK); assert_eq!( stream .start_write(input, *offset, *len, &mut [], 0, 0, Flush::None) .is_ok(), *expected ); assert_eq!(stream.err, Z_OK); stream.close().unwrap(); } } }
{ self.err = self.strm.reset(self.mode); Ok(()) }
identifier_body
mod.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use deno_core::error::bad_resource_id; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::op; use deno_core::OpState; use libz_sys::*; use std::borrow::Cow; use std::cell::RefCell; use std::future::Future; use std::rc::Rc; mod alloc; pub mod brotli; mod mode; mod stream; use mode::Flush; use mode::Mode; use self::stream::StreamWrapper; #[inline] fn check(condition: bool, msg: &str) -> Result<(), AnyError> { if condition { Ok(()) } else { Err(type_error(msg.to_string())) } } #[inline] fn zlib(state: &mut OpState, handle: u32) -> Result<Rc<Zlib>, AnyError> { state .resource_table .get::<Zlib>(handle) .map_err(|_| bad_resource_id()) } #[derive(Default)] struct ZlibInner { dictionary: Option<Vec<u8>>, err: i32, flush: Flush, init_done: bool, level: i32, mem_level: i32, mode: Mode, strategy: i32, window_bits: i32, write_in_progress: bool, pending_close: bool, gzib_id_bytes_read: u32, strm: StreamWrapper, } const GZIP_HEADER_ID1: u8 = 0x1f; const GZIP_HEADER_ID2: u8 = 0x8b; impl ZlibInner { #[allow(clippy::too_many_arguments)] fn start_write( &mut self, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, flush: Flush, ) -> Result<(), AnyError> { check(self.init_done, "write before init")?; check(!self.write_in_progress, "write already in progress")?; check(!self.pending_close, "close already in progress")?; self.write_in_progress = true; let next_in = input .get(in_off as usize..in_off as usize + in_len as usize) .ok_or_else(|| type_error("invalid input range"))? .as_ptr() as *mut _; let next_out = out .get_mut(out_off as usize..out_off as usize + out_len as usize) .ok_or_else(|| type_error("invalid output range"))? .as_mut_ptr(); self.strm.avail_in = in_len; self.strm.next_in = next_in; self.strm.avail_out = out_len; self.strm.next_out = next_out; self.flush = flush; Ok(()) } fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> { self.flush = flush; match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => { self.err = self.strm.deflate(flush); } // Auto-detect mode. Mode::Unzip if self.strm.avail_in > 0 => 'blck: { let mut next_expected_header_byte = Some(0); // SAFETY: `self.strm.next_in` is valid pointer to the input buffer. // `self.strm.avail_in` is the length of the input buffer that is only set by // `start_write`. let strm = unsafe { std::slice::from_raw_parts( self.strm.next_in, self.strm.avail_in as usize, ) }; if self.gzib_id_bytes_read == 0 { if strm[0] == GZIP_HEADER_ID1 { self.gzib_id_bytes_read = 1; next_expected_header_byte = Some(1); // Not enough. if self.strm.avail_in == 1 { break 'blck; } } else { self.mode = Mode::Inflate; next_expected_header_byte = None; } } if self.gzib_id_bytes_read == 1 { let byte = match next_expected_header_byte { Some(i) => strm[i], None => break 'blck, }; if byte == GZIP_HEADER_ID2 { self.gzib_id_bytes_read = 2; self.mode = Mode::Gunzip; } else { self.mode = Mode::Inflate; } } else if next_expected_header_byte.is_some() { return Err(type_error( "invalid number of gzip magic number bytes read", )); } } _ => {} } match self.mode { Mode::Inflate | Mode::Gunzip | Mode::InflateRaw // We're still reading the header. | Mode::Unzip => { self.err = self.strm.inflate(self.flush); // TODO(@littledivy): Use if let chain when it is stable. // https://github.com/rust-lang/rust/issues/53667 // // Data was encoded with dictionary if let (Z_NEED_DICT, Some(dictionary)) = (self.err, &self.dictionary) { self.err = self.strm.inflate_set_dictionary(dictionary); if self.err == Z_OK { self.err = self.strm.inflate(flush); } else if self.err == Z_DATA_ERROR { self.err = Z_NEED_DICT; } } while self.strm.avail_in > 0 && self.mode == Mode::Gunzip && self.err == Z_STREAM_END // SAFETY: `strm` is a valid pointer to zlib strm. // `strm.next_in` is initialized to the input buffer. && unsafe { *self.strm.next_in }!= 0x00 { self.err = self.strm.reset(self.mode); self.err = self.strm.inflate(flush); } } _ => {} } let done = self.strm.avail_out!= 0 && self.flush == Flush::Finish; // We're are not done yet, but output buffer is full if self.err == Z_BUF_ERROR &&!done { // Set to Z_OK to avoid reporting the error in JS. self.err = Z_OK; } self.write_in_progress = false; Ok(()) } fn init_stream(&mut self) -> Result<(), AnyError> { match self.mode { Mode::Gzip | Mode::Gunzip => self.window_bits += 16, Mode::Unzip => self.window_bits += 32, Mode::DeflateRaw | Mode::InflateRaw => self.window_bits *= -1, _ => {} } self.err = match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => self.strm.deflate_init( self.level, self.window_bits, self.mem_level, self.strategy, ), Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => { self.strm.inflate_init(self.window_bits) } Mode::None => return Err(type_error("Unknown mode")), }; self.write_in_progress = false; self.init_done = true; Ok(()) } fn close(&mut self) -> Result<bool, AnyError> { if self.write_in_progress {
self.pending_close = false; check(self.init_done, "close before init")?; self.strm.end(self.mode); self.mode = Mode::None; Ok(true) } fn reset_stream(&mut self) -> Result<(), AnyError> { self.err = self.strm.reset(self.mode); Ok(()) } } struct Zlib { inner: RefCell<ZlibInner>, } impl deno_core::Resource for Zlib { fn name(&self) -> Cow<str> { "zlib".into() } } #[op] pub fn op_zlib_new(state: &mut OpState, mode: i32) -> Result<u32, AnyError> { let mode = Mode::try_from(mode)?; let inner = ZlibInner { mode, ..Default::default() }; Ok(state.resource_table.add(Zlib { inner: RefCell::new(inner), })) } #[op] pub fn op_zlib_close(state: &mut OpState, handle: u32) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); // If there is a pending write, defer the close until the write is done. zlib.close()?; Ok(()) } #[op] pub fn op_zlib_write_async( state: Rc<RefCell<OpState>>, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, ) -> Result< impl Future<Output = Result<(i32, u32, u32), AnyError>> +'static, AnyError, > { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut strm = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; strm.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; let state = state.clone(); Ok(async move { let mut state_mut = state.borrow_mut(); let resource = zlib(&mut state_mut, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.do_write(flush)?; Ok((zlib.err, zlib.strm.avail_out, zlib.strm.avail_in)) }) } #[op] pub fn op_zlib_write( state: &mut OpState, handle: u32, flush: i32, input: &[u8], in_off: u32, in_len: u32, out: &mut [u8], out_off: u32, out_len: u32, result: &mut [u32], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); let flush = Flush::try_from(flush)?; zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; zlib.do_write(flush)?; result[0] = zlib.strm.avail_out; result[1] = zlib.strm.avail_in; Ok(zlib.err) } #[op] pub fn op_zlib_init( state: &mut OpState, handle: u32, level: i32, window_bits: i32, mem_level: i32, strategy: i32, dictionary: &[u8], ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); check((8..=15).contains(&window_bits), "invalid windowBits")?; check((-1..=9).contains(&level), "invalid level")?; check((1..=9).contains(&mem_level), "invalid memLevel")?; check( strategy == Z_DEFAULT_STRATEGY || strategy == Z_FILTERED || strategy == Z_HUFFMAN_ONLY || strategy == Z_RLE || strategy == Z_FIXED, "invalid strategy", )?; zlib.level = level; zlib.window_bits = window_bits; zlib.mem_level = mem_level; zlib.strategy = strategy; zlib.flush = Flush::None; zlib.err = Z_OK; zlib.init_stream()?; zlib.dictionary = if!dictionary.is_empty() { Some(dictionary.to_vec()) } else { None }; Ok(zlib.err) } #[op] pub fn op_zlib_reset( state: &mut OpState, handle: u32, ) -> Result<i32, AnyError> { let resource = zlib(state, handle)?; let mut zlib = resource.inner.borrow_mut(); zlib.reset_stream()?; Ok(zlib.err) } #[op] pub fn op_zlib_close_if_pending( state: &mut OpState, handle: u32, ) -> Result<(), AnyError> { let resource = zlib(state, handle)?; let pending_close = { let mut zlib = resource.inner.borrow_mut(); zlib.write_in_progress = false; zlib.pending_close }; if pending_close { drop(resource); state.resource_table.close(handle)?; } Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn zlib_start_write() { // buffer, length, should pass type WriteVector = (&'static [u8], u32, u32, bool); const WRITE_VECTORS: [WriteVector; 8] = [ (b"Hello", 5, 0, true), (b"H", 1, 0, true), (b"", 0, 0, true), // Overrun the buffer (b"H", 5, 0, false), (b"ello", 5, 0, false), (b"Hello", 5, 1, false), (b"H", 1, 1, false), (b"", 0, 1, false), ]; for (input, len, offset, expected) in WRITE_VECTORS.iter() { let mut stream = ZlibInner { mode: Mode::Inflate, ..Default::default() }; stream.init_stream().unwrap(); assert_eq!(stream.err, Z_OK); assert_eq!( stream .start_write(input, *offset, *len, &mut [], 0, 0, Flush::None) .is_ok(), *expected ); assert_eq!(stream.err, Z_OK); stream.close().unwrap(); } } }
self.pending_close = true; return Ok(false); }
random_line_split
mod.rs
mod assignments; mod colors; mod directory_stack; mod flow; /// The various blocks pub mod flow_control; mod job; mod pipe_exec; mod shell_expand; mod signals; pub mod sys; /// Variables for the shell pub mod variables; use self::{ directory_stack::DirectoryStack, flow_control::{Block, Function, FunctionError, Statement}, pipe_exec::foreground, sys::NULL_PATH, variables::Variables, }; pub use self::{ flow::BlockError, job::{Job, RefinedJob}, pipe_exec::{ job_control::{BackgroundEvent, BackgroundProcess}, PipelineError, }, variables::Value, }; use crate::{ assignments::value_check, builtins::{BuiltinMap, Status}, expansion::{ pipelines::{PipeType, Pipeline}, Error as ExpansionError, }, parser::{ lexers::{Key, Primitive}, Error as ParseError, }, }; use nix::{ sys::signal::{self, SigHandler}, unistd::Pid, }; use std::{ convert::TryFrom, fs::File, mem, ops::{Deref, DerefMut}, rc::Rc, sync::{atomic::Ordering, Arc, Mutex}, time::SystemTime, }; use thiserror::Error; /// Errors from execution #[derive(Debug, Error)] pub enum IonError { // Parse-time error /// Parsing failed #[error("syntax error: {0}")] InvalidSyntax(#[source] ParseError), /// Incorrect order of blocks #[error("block error: {0}")] StatementFlowError(#[source] BlockError), // Run time errors /// Function execution error #[error("function error: {0}")] Function(#[source] FunctionError), /// Failed to run a pipeline #[error("pipeline execution error: {0}")] PipelineExecutionError(#[source] PipelineError), /// Could not properly expand to a pipeline #[error("expansion error: {0}")] ExpansionError(#[source] ExpansionError<IonError>), } impl From<ParseError> for IonError { #[must_use] fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) } } impl From<FunctionError> for IonError { #[must_use] fn from(cause: FunctionError) -> Self { Self::Function(cause) } } impl From<BlockError> for IonError { #[must_use] fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) } } impl From<PipelineError> for IonError { #[must_use] fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) } } impl From<ExpansionError<Self>> for IonError { #[must_use] fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) } } /// Options for the shell #[derive(Debug, Clone, Hash, Default)] pub struct Options { /// Exit from the shell on the first error. pub err_exit: bool, /// Activates the -p option, aka pipefail in bash pub pipe_fail: bool, /// Do not execute any commands given to the shell. pub no_exec: bool, /// If set, denotes that this shell is running as a background job. pub grab_tty: bool, } /// The shell structure is a megastructure that manages all of the state of the shell throughout /// the entirety of the /// program. It is initialized at the beginning of the program, and lives until the end of the /// program. pub struct Shell<'a> { /// Contains a list of built-in commands that were created when the program /// started. builtins: BuiltinMap<'a>, /// Contains the aliases, strings, and array variable maps. variables: Variables, /// Contains the current state of flow control parameters. flow_control: Block, /// Contains the directory stack parameters. directory_stack: DirectoryStack, /// When a command is executed, the final result of that command is stored /// here. previous_status: Status, /// The job ID of the previous command sent to the background. previous_job: usize, /// Contains all the options relative to the shell opts: Options, /// Contains information on all of the active background processes that are being managed /// by the shell. background: Arc<Mutex<Vec<BackgroundProcess>>>, /// When the `fg` command is run, this will be used to communicate with the specified /// background process. foreground_signals: Arc<foreground::Signals>, // Callbacks /// Custom callback for each command call on_command: Option<OnCommandCallback<'a>>, /// Custom callback before each command call pre_command: Option<PreCommandCallback<'a>>, /// Custom callback when a background event occurs background_event: Option<BackgroundEventCallback>, // Default std pipes stdin: Option<File>, stdout: Option<File>, stderr: Option<File>, } /// A callback that is executed after each pipeline is run pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>; /// A callback that is executed before each pipeline is run pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>; /// A callback that is executed when a background event occurs pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>; impl<'a> Default for Shell<'a> { #[must_use] fn default() -> Self { Self::new() } } impl<'a> Shell<'a> { /// Install signal handlers necessary for the shell to work fn install_signal_handler() { extern "C" fn handler(signal: i32) { let signal = signal::Signal::try_from(signal).unwrap(); let signal = match signal { signal::Signal::SIGINT => signals::SIGINT, signal::Signal::SIGHUP => signals::SIGHUP, signal::Signal::SIGTERM => signals::SIGTERM, _ => unreachable!(), }; signals::PENDING.store(signal as usize, Ordering::SeqCst); } unsafe { let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler)); } } /// Create a new shell with default settings #[must_use] pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) } /// Create a shell with custom builtins #[must_use] pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self { Self::install_signal_handler(); // This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required // for this shell to manage its own process group / children / etc. signals::block(); Shell { builtins, variables: Variables::default(), flow_control: Block::with_capacity(5), directory_stack: DirectoryStack::new(), previous_job:!0, previous_status: Status::SUCCESS, opts: Options::default(), background: Arc::new(Mutex::new(Vec::new())), foreground_signals: Arc::new(foreground::Signals::new()), on_command: None, pre_command: None, background_event: None, stdin: None, stdout: None, stderr: None, } } /// Replace the default stdin pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> { mem::replace(&mut self.stdin, stdin.into()) } /// Replace the default stdout pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> { mem::replace(&mut self.stdout, stdout.into()) } /// Replace the default stderr pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> { mem::replace(&mut self.stderr, stderr.into()) } /// Access the directory stack #[must_use] pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack } /// Mutable access to the directory stack #[must_use] pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack } /// Resets the flow control fields to their default values. pub fn reset_flow(&mut self) { self.flow_control.clear(); } /// Exit the current block pub fn exit_block(&mut self) -> Result<(), BlockError> { self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd) } /// Get the depth of the current block #[must_use] pub fn block_len(&self) -> usize { self.flow_control.len() } /// A method for executing a function, using `args` as the input. pub fn execute_function<S: AsRef<str>>( &mut self, function: &Rc<Function>, args: &[S], ) -> Result<Status, IonError> { function.clone().execute(self, args)?; Ok(self.previous_status) } /// A method for executing commands in the Ion shell without capturing. It takes command(s) /// as /// a string argument, parses them, and executes them the same as it would if you had /// executed /// the command(s) in the command line REPL interface for Ion. If the supplied command is /// not /// terminated, then an error will be returned. pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> { self.on_command(command.bytes().filter_map(Result::ok), true)?; if let Some(block) = self.flow_control.last().map(Statement::to_string) { self.previous_status = Status::from_exit_code(1); Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block))) } else { Ok(self.previous_status) } } /// Executes a pipeline and returns the final exit status of the pipeline. pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> { let command_start_time = SystemTime::now(); let mut pipeline = pipeline.expand(self)?; let null_file = if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None }; let (stderr, stdout) = ( null_file.as_ref().or_else(|| self.stderr.as_ref()), null_file.as_ref().or_else(|| self.stdout.as_ref()), ); for item in &mut pipeline.items { item.job.stdin = self .stdin .as_ref() .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stdout = stdout .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stderr = stderr .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; } if let Some(ref callback) = self.pre_command { callback(self, &pipeline); } // Don't execute commands when the `-n` flag is passed. let exit_status = if self.opts.no_exec { Ok(Status::SUCCESS) } else if pipeline.requires_piping() || self.stderr.is_some() || self.stdin.is_some() || self.stdout.is_some() { self.execute_pipeline(pipeline).map_err(Into::into) } else if let Some(main) = self.builtins.get(pipeline.items[0].command()) { Ok(main(&pipeline.items[0].job.args, self)) } else if let Some(Value::Function(function)) = self.variables.get(&pipeline.items[0].job.args[0]).cloned() { function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status) } else { self.execute_pipeline(pipeline).map_err(Into::into) }?; if let Some(ref callback) = self.on_command { if let Ok(elapsed_time) = command_start_time.elapsed() { callback(self, elapsed_time); } } if self.opts.err_exit &&!exit_status.is_success() { return Err(PipelineError::EarlyExit(exit_status).into()); } Ok(exit_status) } /// Get the pid of the last executed job #[must_use] pub const fn previous_job(&self) -> Option<usize> { if self.previous_job ==!0 { None } else { Some(self.previous_job) } } /// Set the callback to call before each command pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) { self.background_event = callback; } /// Set the callback to call before each command #[must_use] pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> { &mut self.background_event } /// Set the callback to call before each command pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) { self.pre_command = callback; } /// Set the callback to call before each command #[must_use] pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> { &mut self.pre_command } /// Set the callback to call on each command pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) { self.on_command = callback; } /// Set the callback to call on each command pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command } /// Get access to the builtins #[must_use] pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins } /// Get a mutable access to the builtins /// /// Warning: Previously defined functions will rely on previous versions of the builtins, even /// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell /// is running #[must_use] pub fn
(&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins } /// Access to the shell options #[must_use] pub const fn opts(&self) -> &Options { &self.opts } /// Mutable access to the shell options #[must_use] pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts } /// Access to the variables #[must_use] pub const fn variables(&self) -> &Variables { &self.variables } /// Mutable access to the variables #[must_use] pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables } /// Access to the variables #[must_use] pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Mutable access to the variables pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Get a function if it exists pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> { if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) { Some(function.clone()) } else { None } } /// Get the last command's return code and/or the code for the error pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; } /// Get the last command's return code and/or the code for the error #[must_use] pub const fn previous_status(&self) -> Status { self.previous_status } fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> { match (&key.kind, &value) { (Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => { let index = value_check(self, index_name, index_kind) .map_err(|why| format!("{}: {}", key.name, why))?; match index { Value::Str(index) => { let lhs = self .variables .get_mut(key.name) .ok_or_else(|| "index value does not exist".to_string())?; match lhs { Value::HashMap(hmap) => { let _ = hmap.insert(index, value); Ok(()) } Value::BTreeMap(bmap) => { let _ = bmap.insert(index, value); Ok(()) } Value::Array(array) => { let index_num = index.parse::<usize>().map_err(|_| { format!("index variable is not a numeric value: `{}`", index) })?; if let Some(var) = array.get_mut(index_num) { *var = value; } Ok(()) } Value::Str(_) => Err("cannot assign to an index of a string".into()), _ => Ok(()), } } Value::Array(_) => Err("index variable cannot be an array".into()), Value::HashMap(_) => Err("index variable cannot be a hmap".into()), Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()), _ => Ok(()), } } (_, Value::Str(_)) | (_, Value::Array(_)) | (Primitive::HashMap(_), Value::HashMap(_)) | (Primitive::BTreeMap(_), Value::BTreeMap(_)) => { self.variables.set(key.name, value); Ok(()) } _ => Ok(()), } } }
builtins_mut
identifier_name
mod.rs
mod assignments; mod colors; mod directory_stack; mod flow; /// The various blocks pub mod flow_control; mod job; mod pipe_exec; mod shell_expand; mod signals; pub mod sys; /// Variables for the shell pub mod variables; use self::{ directory_stack::DirectoryStack, flow_control::{Block, Function, FunctionError, Statement}, pipe_exec::foreground, sys::NULL_PATH, variables::Variables, }; pub use self::{ flow::BlockError, job::{Job, RefinedJob}, pipe_exec::{ job_control::{BackgroundEvent, BackgroundProcess}, PipelineError, }, variables::Value, }; use crate::{ assignments::value_check, builtins::{BuiltinMap, Status}, expansion::{ pipelines::{PipeType, Pipeline}, Error as ExpansionError, }, parser::{ lexers::{Key, Primitive}, Error as ParseError, }, }; use nix::{ sys::signal::{self, SigHandler}, unistd::Pid, }; use std::{ convert::TryFrom, fs::File, mem, ops::{Deref, DerefMut}, rc::Rc, sync::{atomic::Ordering, Arc, Mutex}, time::SystemTime, }; use thiserror::Error; /// Errors from execution #[derive(Debug, Error)] pub enum IonError { // Parse-time error /// Parsing failed #[error("syntax error: {0}")] InvalidSyntax(#[source] ParseError), /// Incorrect order of blocks #[error("block error: {0}")] StatementFlowError(#[source] BlockError), // Run time errors /// Function execution error #[error("function error: {0}")] Function(#[source] FunctionError), /// Failed to run a pipeline #[error("pipeline execution error: {0}")] PipelineExecutionError(#[source] PipelineError), /// Could not properly expand to a pipeline #[error("expansion error: {0}")] ExpansionError(#[source] ExpansionError<IonError>), } impl From<ParseError> for IonError { #[must_use] fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) } } impl From<FunctionError> for IonError { #[must_use] fn from(cause: FunctionError) -> Self { Self::Function(cause) } } impl From<BlockError> for IonError { #[must_use] fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) } } impl From<PipelineError> for IonError { #[must_use] fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) } } impl From<ExpansionError<Self>> for IonError { #[must_use] fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) } } /// Options for the shell #[derive(Debug, Clone, Hash, Default)] pub struct Options { /// Exit from the shell on the first error. pub err_exit: bool, /// Activates the -p option, aka pipefail in bash pub pipe_fail: bool, /// Do not execute any commands given to the shell. pub no_exec: bool, /// If set, denotes that this shell is running as a background job. pub grab_tty: bool, } /// The shell structure is a megastructure that manages all of the state of the shell throughout /// the entirety of the /// program. It is initialized at the beginning of the program, and lives until the end of the /// program. pub struct Shell<'a> { /// Contains a list of built-in commands that were created when the program /// started. builtins: BuiltinMap<'a>, /// Contains the aliases, strings, and array variable maps. variables: Variables, /// Contains the current state of flow control parameters. flow_control: Block, /// Contains the directory stack parameters. directory_stack: DirectoryStack, /// When a command is executed, the final result of that command is stored /// here. previous_status: Status, /// The job ID of the previous command sent to the background. previous_job: usize, /// Contains all the options relative to the shell opts: Options, /// Contains information on all of the active background processes that are being managed /// by the shell. background: Arc<Mutex<Vec<BackgroundProcess>>>, /// When the `fg` command is run, this will be used to communicate with the specified /// background process. foreground_signals: Arc<foreground::Signals>, // Callbacks /// Custom callback for each command call on_command: Option<OnCommandCallback<'a>>, /// Custom callback before each command call pre_command: Option<PreCommandCallback<'a>>, /// Custom callback when a background event occurs background_event: Option<BackgroundEventCallback>, // Default std pipes stdin: Option<File>, stdout: Option<File>, stderr: Option<File>, } /// A callback that is executed after each pipeline is run pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>; /// A callback that is executed before each pipeline is run pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>; /// A callback that is executed when a background event occurs pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>; impl<'a> Default for Shell<'a> { #[must_use] fn default() -> Self { Self::new() } } impl<'a> Shell<'a> { /// Install signal handlers necessary for the shell to work fn install_signal_handler() { extern "C" fn handler(signal: i32) { let signal = signal::Signal::try_from(signal).unwrap(); let signal = match signal { signal::Signal::SIGINT => signals::SIGINT, signal::Signal::SIGHUP => signals::SIGHUP, signal::Signal::SIGTERM => signals::SIGTERM, _ => unreachable!(), }; signals::PENDING.store(signal as usize, Ordering::SeqCst); } unsafe { let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler)); } } /// Create a new shell with default settings #[must_use] pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) } /// Create a shell with custom builtins #[must_use] pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self { Self::install_signal_handler(); // This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required // for this shell to manage its own process group / children / etc. signals::block(); Shell { builtins, variables: Variables::default(), flow_control: Block::with_capacity(5), directory_stack: DirectoryStack::new(), previous_job:!0, previous_status: Status::SUCCESS, opts: Options::default(), background: Arc::new(Mutex::new(Vec::new())), foreground_signals: Arc::new(foreground::Signals::new()), on_command: None, pre_command: None, background_event: None, stdin: None, stdout: None, stderr: None, } } /// Replace the default stdin pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> { mem::replace(&mut self.stdin, stdin.into()) } /// Replace the default stdout pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> { mem::replace(&mut self.stdout, stdout.into()) } /// Replace the default stderr pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> { mem::replace(&mut self.stderr, stderr.into()) } /// Access the directory stack #[must_use] pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack } /// Mutable access to the directory stack #[must_use] pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack } /// Resets the flow control fields to their default values. pub fn reset_flow(&mut self) { self.flow_control.clear(); } /// Exit the current block pub fn exit_block(&mut self) -> Result<(), BlockError> { self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd) } /// Get the depth of the current block #[must_use] pub fn block_len(&self) -> usize { self.flow_control.len() } /// A method for executing a function, using `args` as the input. pub fn execute_function<S: AsRef<str>>( &mut self, function: &Rc<Function>, args: &[S], ) -> Result<Status, IonError> { function.clone().execute(self, args)?; Ok(self.previous_status) } /// A method for executing commands in the Ion shell without capturing. It takes command(s) /// as /// a string argument, parses them, and executes them the same as it would if you had /// executed /// the command(s) in the command line REPL interface for Ion. If the supplied command is /// not /// terminated, then an error will be returned. pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> { self.on_command(command.bytes().filter_map(Result::ok), true)?; if let Some(block) = self.flow_control.last().map(Statement::to_string) { self.previous_status = Status::from_exit_code(1); Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block))) } else { Ok(self.previous_status) } } /// Executes a pipeline and returns the final exit status of the pipeline. pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> { let command_start_time = SystemTime::now(); let mut pipeline = pipeline.expand(self)?; let null_file = if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None }; let (stderr, stdout) = ( null_file.as_ref().or_else(|| self.stderr.as_ref()), null_file.as_ref().or_else(|| self.stdout.as_ref()), ); for item in &mut pipeline.items { item.job.stdin = self .stdin .as_ref() .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stdout = stdout .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stderr = stderr .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; } if let Some(ref callback) = self.pre_command { callback(self, &pipeline); } // Don't execute commands when the `-n` flag is passed. let exit_status = if self.opts.no_exec { Ok(Status::SUCCESS) } else if pipeline.requires_piping() || self.stderr.is_some() || self.stdin.is_some() || self.stdout.is_some() { self.execute_pipeline(pipeline).map_err(Into::into) } else if let Some(main) = self.builtins.get(pipeline.items[0].command()) { Ok(main(&pipeline.items[0].job.args, self)) } else if let Some(Value::Function(function)) = self.variables.get(&pipeline.items[0].job.args[0]).cloned() { function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status) } else { self.execute_pipeline(pipeline).map_err(Into::into) }?; if let Some(ref callback) = self.on_command { if let Ok(elapsed_time) = command_start_time.elapsed() { callback(self, elapsed_time); } } if self.opts.err_exit &&!exit_status.is_success() { return Err(PipelineError::EarlyExit(exit_status).into()); } Ok(exit_status) } /// Get the pid of the last executed job #[must_use] pub const fn previous_job(&self) -> Option<usize> { if self.previous_job ==!0 { None } else { Some(self.previous_job) } } /// Set the callback to call before each command pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) { self.background_event = callback; } /// Set the callback to call before each command #[must_use] pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> { &mut self.background_event } /// Set the callback to call before each command pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) { self.pre_command = callback; } /// Set the callback to call before each command #[must_use] pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>>
/// Set the callback to call on each command pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) { self.on_command = callback; } /// Set the callback to call on each command pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command } /// Get access to the builtins #[must_use] pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins } /// Get a mutable access to the builtins /// /// Warning: Previously defined functions will rely on previous versions of the builtins, even /// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell /// is running #[must_use] pub fn builtins_mut(&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins } /// Access to the shell options #[must_use] pub const fn opts(&self) -> &Options { &self.opts } /// Mutable access to the shell options #[must_use] pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts } /// Access to the variables #[must_use] pub const fn variables(&self) -> &Variables { &self.variables } /// Mutable access to the variables #[must_use] pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables } /// Access to the variables #[must_use] pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Mutable access to the variables pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Get a function if it exists pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> { if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) { Some(function.clone()) } else { None } } /// Get the last command's return code and/or the code for the error pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; } /// Get the last command's return code and/or the code for the error #[must_use] pub const fn previous_status(&self) -> Status { self.previous_status } fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> { match (&key.kind, &value) { (Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => { let index = value_check(self, index_name, index_kind) .map_err(|why| format!("{}: {}", key.name, why))?; match index { Value::Str(index) => { let lhs = self .variables .get_mut(key.name) .ok_or_else(|| "index value does not exist".to_string())?; match lhs { Value::HashMap(hmap) => { let _ = hmap.insert(index, value); Ok(()) } Value::BTreeMap(bmap) => { let _ = bmap.insert(index, value); Ok(()) } Value::Array(array) => { let index_num = index.parse::<usize>().map_err(|_| { format!("index variable is not a numeric value: `{}`", index) })?; if let Some(var) = array.get_mut(index_num) { *var = value; } Ok(()) } Value::Str(_) => Err("cannot assign to an index of a string".into()), _ => Ok(()), } } Value::Array(_) => Err("index variable cannot be an array".into()), Value::HashMap(_) => Err("index variable cannot be a hmap".into()), Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()), _ => Ok(()), } } (_, Value::Str(_)) | (_, Value::Array(_)) | (Primitive::HashMap(_), Value::HashMap(_)) | (Primitive::BTreeMap(_), Value::BTreeMap(_)) => { self.variables.set(key.name, value); Ok(()) } _ => Ok(()), } } }
{ &mut self.pre_command }
identifier_body
mod.rs
mod assignments; mod colors; mod directory_stack; mod flow; /// The various blocks pub mod flow_control; mod job; mod pipe_exec; mod shell_expand; mod signals; pub mod sys; /// Variables for the shell pub mod variables; use self::{ directory_stack::DirectoryStack, flow_control::{Block, Function, FunctionError, Statement}, pipe_exec::foreground, sys::NULL_PATH, variables::Variables, }; pub use self::{ flow::BlockError, job::{Job, RefinedJob}, pipe_exec::{ job_control::{BackgroundEvent, BackgroundProcess}, PipelineError, }, variables::Value, }; use crate::{ assignments::value_check, builtins::{BuiltinMap, Status}, expansion::{ pipelines::{PipeType, Pipeline}, Error as ExpansionError, }, parser::{ lexers::{Key, Primitive}, Error as ParseError, }, }; use nix::{ sys::signal::{self, SigHandler}, unistd::Pid, }; use std::{ convert::TryFrom, fs::File, mem, ops::{Deref, DerefMut}, rc::Rc, sync::{atomic::Ordering, Arc, Mutex}, time::SystemTime, }; use thiserror::Error; /// Errors from execution #[derive(Debug, Error)] pub enum IonError { // Parse-time error /// Parsing failed #[error("syntax error: {0}")] InvalidSyntax(#[source] ParseError), /// Incorrect order of blocks #[error("block error: {0}")] StatementFlowError(#[source] BlockError), // Run time errors /// Function execution error #[error("function error: {0}")] Function(#[source] FunctionError), /// Failed to run a pipeline #[error("pipeline execution error: {0}")] PipelineExecutionError(#[source] PipelineError), /// Could not properly expand to a pipeline #[error("expansion error: {0}")] ExpansionError(#[source] ExpansionError<IonError>), } impl From<ParseError> for IonError { #[must_use] fn from(cause: ParseError) -> Self { Self::InvalidSyntax(cause) } } impl From<FunctionError> for IonError { #[must_use] fn from(cause: FunctionError) -> Self { Self::Function(cause) } } impl From<BlockError> for IonError { #[must_use] fn from(cause: BlockError) -> Self { Self::StatementFlowError(cause) } } impl From<PipelineError> for IonError { #[must_use] fn from(cause: PipelineError) -> Self { Self::PipelineExecutionError(cause) } } impl From<ExpansionError<Self>> for IonError { #[must_use] fn from(cause: ExpansionError<Self>) -> Self { Self::ExpansionError(cause) } } /// Options for the shell #[derive(Debug, Clone, Hash, Default)] pub struct Options { /// Exit from the shell on the first error. pub err_exit: bool, /// Activates the -p option, aka pipefail in bash pub pipe_fail: bool, /// Do not execute any commands given to the shell. pub no_exec: bool, /// If set, denotes that this shell is running as a background job. pub grab_tty: bool, } /// The shell structure is a megastructure that manages all of the state of the shell throughout /// the entirety of the /// program. It is initialized at the beginning of the program, and lives until the end of the /// program. pub struct Shell<'a> { /// Contains a list of built-in commands that were created when the program
flow_control: Block, /// Contains the directory stack parameters. directory_stack: DirectoryStack, /// When a command is executed, the final result of that command is stored /// here. previous_status: Status, /// The job ID of the previous command sent to the background. previous_job: usize, /// Contains all the options relative to the shell opts: Options, /// Contains information on all of the active background processes that are being managed /// by the shell. background: Arc<Mutex<Vec<BackgroundProcess>>>, /// When the `fg` command is run, this will be used to communicate with the specified /// background process. foreground_signals: Arc<foreground::Signals>, // Callbacks /// Custom callback for each command call on_command: Option<OnCommandCallback<'a>>, /// Custom callback before each command call pre_command: Option<PreCommandCallback<'a>>, /// Custom callback when a background event occurs background_event: Option<BackgroundEventCallback>, // Default std pipes stdin: Option<File>, stdout: Option<File>, stderr: Option<File>, } /// A callback that is executed after each pipeline is run pub type OnCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, std::time::Duration) + 'a>; /// A callback that is executed before each pipeline is run pub type PreCommandCallback<'a> = Box<dyn Fn(&Shell<'_>, &Pipeline<RefinedJob<'_>>) + 'a>; /// A callback that is executed when a background event occurs pub type BackgroundEventCallback = Arc<dyn Fn(usize, Pid, BackgroundEvent) + Send + Sync>; impl<'a> Default for Shell<'a> { #[must_use] fn default() -> Self { Self::new() } } impl<'a> Shell<'a> { /// Install signal handlers necessary for the shell to work fn install_signal_handler() { extern "C" fn handler(signal: i32) { let signal = signal::Signal::try_from(signal).unwrap(); let signal = match signal { signal::Signal::SIGINT => signals::SIGINT, signal::Signal::SIGHUP => signals::SIGHUP, signal::Signal::SIGTERM => signals::SIGTERM, _ => unreachable!(), }; signals::PENDING.store(signal as usize, Ordering::SeqCst); } unsafe { let _ = signal::signal(signal::Signal::SIGHUP, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGINT, SigHandler::Handler(handler)); let _ = signal::signal(signal::Signal::SIGTERM, SigHandler::Handler(handler)); } } /// Create a new shell with default settings #[must_use] pub fn new() -> Self { Self::with_builtins(BuiltinMap::default()) } /// Create a shell with custom builtins #[must_use] pub fn with_builtins(builtins: BuiltinMap<'a>) -> Self { Self::install_signal_handler(); // This will block SIGTSTP, SIGTTOU, SIGTTIN, and SIGCHLD, which is required // for this shell to manage its own process group / children / etc. signals::block(); Shell { builtins, variables: Variables::default(), flow_control: Block::with_capacity(5), directory_stack: DirectoryStack::new(), previous_job:!0, previous_status: Status::SUCCESS, opts: Options::default(), background: Arc::new(Mutex::new(Vec::new())), foreground_signals: Arc::new(foreground::Signals::new()), on_command: None, pre_command: None, background_event: None, stdin: None, stdout: None, stderr: None, } } /// Replace the default stdin pub fn stdin<T: Into<Option<File>>>(&mut self, stdin: T) -> Option<File> { mem::replace(&mut self.stdin, stdin.into()) } /// Replace the default stdout pub fn stdout<T: Into<Option<File>>>(&mut self, stdout: T) -> Option<File> { mem::replace(&mut self.stdout, stdout.into()) } /// Replace the default stderr pub fn stderr<T: Into<Option<File>>>(&mut self, stderr: T) -> Option<File> { mem::replace(&mut self.stderr, stderr.into()) } /// Access the directory stack #[must_use] pub const fn dir_stack(&self) -> &DirectoryStack { &self.directory_stack } /// Mutable access to the directory stack #[must_use] pub fn dir_stack_mut(&mut self) -> &mut DirectoryStack { &mut self.directory_stack } /// Resets the flow control fields to their default values. pub fn reset_flow(&mut self) { self.flow_control.clear(); } /// Exit the current block pub fn exit_block(&mut self) -> Result<(), BlockError> { self.flow_control.pop().map(|_| ()).ok_or(BlockError::UnmatchedEnd) } /// Get the depth of the current block #[must_use] pub fn block_len(&self) -> usize { self.flow_control.len() } /// A method for executing a function, using `args` as the input. pub fn execute_function<S: AsRef<str>>( &mut self, function: &Rc<Function>, args: &[S], ) -> Result<Status, IonError> { function.clone().execute(self, args)?; Ok(self.previous_status) } /// A method for executing commands in the Ion shell without capturing. It takes command(s) /// as /// a string argument, parses them, and executes them the same as it would if you had /// executed /// the command(s) in the command line REPL interface for Ion. If the supplied command is /// not /// terminated, then an error will be returned. pub fn execute_command<T: std::io::Read>(&mut self, command: T) -> Result<Status, IonError> { self.on_command(command.bytes().filter_map(Result::ok), true)?; if let Some(block) = self.flow_control.last().map(Statement::to_string) { self.previous_status = Status::from_exit_code(1); Err(IonError::StatementFlowError(BlockError::UnclosedBlock(block))) } else { Ok(self.previous_status) } } /// Executes a pipeline and returns the final exit status of the pipeline. pub fn run_pipeline(&mut self, pipeline: &Pipeline<Job>) -> Result<Status, IonError> { let command_start_time = SystemTime::now(); let mut pipeline = pipeline.expand(self)?; let null_file = if pipeline.pipe == PipeType::Disown { File::open(NULL_PATH).ok() } else { None }; let (stderr, stdout) = ( null_file.as_ref().or_else(|| self.stderr.as_ref()), null_file.as_ref().or_else(|| self.stdout.as_ref()), ); for item in &mut pipeline.items { item.job.stdin = self .stdin .as_ref() .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stdout = stdout .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; item.job.stderr = stderr .map(|file| file.try_clone().map_err(PipelineError::ClonePipeFailed)) .transpose()?; } if let Some(ref callback) = self.pre_command { callback(self, &pipeline); } // Don't execute commands when the `-n` flag is passed. let exit_status = if self.opts.no_exec { Ok(Status::SUCCESS) } else if pipeline.requires_piping() || self.stderr.is_some() || self.stdin.is_some() || self.stdout.is_some() { self.execute_pipeline(pipeline).map_err(Into::into) } else if let Some(main) = self.builtins.get(pipeline.items[0].command()) { Ok(main(&pipeline.items[0].job.args, self)) } else if let Some(Value::Function(function)) = self.variables.get(&pipeline.items[0].job.args[0]).cloned() { function.execute(self, &pipeline.items[0].job.args).map(|_| self.previous_status) } else { self.execute_pipeline(pipeline).map_err(Into::into) }?; if let Some(ref callback) = self.on_command { if let Ok(elapsed_time) = command_start_time.elapsed() { callback(self, elapsed_time); } } if self.opts.err_exit &&!exit_status.is_success() { return Err(PipelineError::EarlyExit(exit_status).into()); } Ok(exit_status) } /// Get the pid of the last executed job #[must_use] pub const fn previous_job(&self) -> Option<usize> { if self.previous_job ==!0 { None } else { Some(self.previous_job) } } /// Set the callback to call before each command pub fn set_background_event(&mut self, callback: Option<BackgroundEventCallback>) { self.background_event = callback; } /// Set the callback to call before each command #[must_use] pub fn background_event_mut(&mut self) -> &mut Option<BackgroundEventCallback> { &mut self.background_event } /// Set the callback to call before each command pub fn set_pre_command(&mut self, callback: Option<PreCommandCallback<'a>>) { self.pre_command = callback; } /// Set the callback to call before each command #[must_use] pub fn pre_command_mut(&mut self) -> &mut Option<PreCommandCallback<'a>> { &mut self.pre_command } /// Set the callback to call on each command pub fn set_on_command(&mut self, callback: Option<OnCommandCallback<'a>>) { self.on_command = callback; } /// Set the callback to call on each command pub fn on_command_mut(&mut self) -> &mut Option<OnCommandCallback<'a>> { &mut self.on_command } /// Get access to the builtins #[must_use] pub const fn builtins(&self) -> &BuiltinMap<'a> { &self.builtins } /// Get a mutable access to the builtins /// /// Warning: Previously defined functions will rely on previous versions of the builtins, even /// if they are redefined. It is strongly advised to avoid mutating the builtins while the shell /// is running #[must_use] pub fn builtins_mut(&mut self) -> &mut BuiltinMap<'a> { &mut self.builtins } /// Access to the shell options #[must_use] pub const fn opts(&self) -> &Options { &self.opts } /// Mutable access to the shell options #[must_use] pub fn opts_mut(&mut self) -> &mut Options { &mut self.opts } /// Access to the variables #[must_use] pub const fn variables(&self) -> &Variables { &self.variables } /// Mutable access to the variables #[must_use] pub fn variables_mut(&mut self) -> &mut Variables { &mut self.variables } /// Access to the variables #[must_use] pub fn background_jobs(&self) -> impl Deref<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Mutable access to the variables pub fn background_jobs_mut(&mut self) -> impl DerefMut<Target = Vec<BackgroundProcess>> + '_ { self.background.lock().expect("Could not lock the mutex") } /// Get a function if it exists pub fn get_func<T: AsRef<str>>(&self, f: T) -> Option<Rc<Function>> { if let Some(Value::Function(function)) = self.variables().get(f.as_ref()) { Some(function.clone()) } else { None } } /// Get the last command's return code and/or the code for the error pub fn set_previous_status(&mut self, status: Status) { self.previous_status = status; } /// Get the last command's return code and/or the code for the error #[must_use] pub const fn previous_status(&self) -> Status { self.previous_status } fn assign(&mut self, key: &Key<'_>, value: Value<Rc<Function>>) -> Result<(), String> { match (&key.kind, &value) { (Primitive::Indexed(ref index_name, ref index_kind), Value::Str(_)) => { let index = value_check(self, index_name, index_kind) .map_err(|why| format!("{}: {}", key.name, why))?; match index { Value::Str(index) => { let lhs = self .variables .get_mut(key.name) .ok_or_else(|| "index value does not exist".to_string())?; match lhs { Value::HashMap(hmap) => { let _ = hmap.insert(index, value); Ok(()) } Value::BTreeMap(bmap) => { let _ = bmap.insert(index, value); Ok(()) } Value::Array(array) => { let index_num = index.parse::<usize>().map_err(|_| { format!("index variable is not a numeric value: `{}`", index) })?; if let Some(var) = array.get_mut(index_num) { *var = value; } Ok(()) } Value::Str(_) => Err("cannot assign to an index of a string".into()), _ => Ok(()), } } Value::Array(_) => Err("index variable cannot be an array".into()), Value::HashMap(_) => Err("index variable cannot be a hmap".into()), Value::BTreeMap(_) => Err("index variable cannot be a bmap".into()), _ => Ok(()), } } (_, Value::Str(_)) | (_, Value::Array(_)) | (Primitive::HashMap(_), Value::HashMap(_)) | (Primitive::BTreeMap(_), Value::BTreeMap(_)) => { self.variables.set(key.name, value); Ok(()) } _ => Ok(()), } } }
/// started. builtins: BuiltinMap<'a>, /// Contains the aliases, strings, and array variable maps. variables: Variables, /// Contains the current state of flow control parameters.
random_line_split
lib.rs
//! The SGX root enclave //! //! ## Authors //! //! The Veracruz Development Team. //! //! ## Licensing and copyright notice //! //! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for //! information on licensing and copyright. #![no_std] #[macro_use] extern crate sgx_tstd as std; use lazy_static::lazy_static; use sgx_tdh::{SgxDhInitiator, SgxDhMsg3}; use sgx_types; use sgx_types::{ sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t, sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t, sgx_ra_init, sgx_status_t, sgx_target_info_t, }; use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}}; use ring::{rand::SystemRandom, signature::EcdsaKeyPair}; use veracruz_utils::csr; lazy_static! { static ref SESSION_ID: AtomicU64 = AtomicU64::new(1); static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> = std::sync::SgxMutex::new(HashMap::new()); static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None); static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None); } pub enum SgxRootEnclave { Success = 0x00, Msg3RawError = 0x01, ProcMsg3Error = 0x02, CsrVerifyFail = 0x03, CsrToCertFail = 0x04, LockFail = 0x05, HashError = 0x06, PKCS8Error = 0x07, StateError = 0x08, PrivateKeyNotPopulated = 0x09, } #[no_mangle] pub extern "C" fn get_firmware_version_len(p_fwv_len: &mut usize) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); *p_fwv_len = version.len(); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn get_firmware_version( p_firmware_version_buf: *mut u8, fv_buf_size: usize, ) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); assert!(version.len() <= fv_buf_size); let version_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) }; version_buf_slice.clone_from_slice(&version.as_bytes()); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn init_remote_attestation_enc( pub_key_buf: *const u8, pub_key_size: usize, p_context: *mut sgx_ra_context_t, ) -> sgx_status_t { assert!(pub_key_size!= 0); assert!(!pub_key_buf.is_null()); let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) }; let pub_key = sgx_ec256_public_t { gx: from_slice(&pub_key_vec[0..32]), gy: from_slice(&pub_key_vec[32..64]), }; let mut context: sgx_ra_context_t = 0; assert!(pub_key_vec.len() > 0); let ret = unsafe { sgx_ra_init( &pub_key as *const sgx_ec256_public_t, 0, &mut context as *mut sgx_ra_context_t, ) }; if ret!= sgx_status_t::SGX_SUCCESS { return ret; } unsafe { *p_context = context; } return ret; } /// Retrieve or generate the private key as a Vec<u8> fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> { let mut private_key_guard = match PRIVATE_KEY.lock() { Err(_) => return Err(SgxRootEnclave::LockFail), Ok(guard) => guard, }; let pkcs8_bytes = match &*private_key_guard { Some(bytes) => { bytes.clone() } None => { // ECDSA prime256r1 generation. let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8( &ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING, &SystemRandom::new(),) .map_err(|_| SgxRootEnclave::PKCS8Error)?; *private_key_guard = Some(pkcs8_bytes.as_ref().to_vec()); pkcs8_bytes.as_ref().to_vec() } }; return Ok(pkcs8_bytes); } #[no_mangle] pub extern "C" fn sgx_get_collateral_report( p_pubkey_challenge: *const u8, pubkey_challenge_size: usize, p_target_info: *const sgx_target_info_t, report: *mut sgx_types::sgx_report_t, csr_buffer: *mut u8, csr_buf_size: usize, p_csr_size: *mut usize, ) -> sgx_status_t { let pubkey_challenge_vec = unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) }; let mut report_data = sgx_types::sgx_report_data_t::default(); // place the challenge in the report report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec); let private_key_ring = { let private_key_vec = match get_private_key() { Ok(vec) => vec, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(pkr) => pkr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, } }; // generate the certificate signing request let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) { Ok(csr) => csr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; // // place the hash of the csr in the report let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec); report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref()); let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) }; assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS); // place the csr where it needs to be if csr_vec.len() > csr_buf_size { assert!(false); } else { let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) }; csr_buf_slice.clone_from_slice(&csr_vec); unsafe { *p_csr_size = csr_vec.len() }; } sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn sgx_send_cert_chain( root_cert: *const u8, root_cert_size: usize, enclave_cert: *const u8, enclave_cert_size: usize, ) -> sgx_status_t { let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) }; let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) }; let mut cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; match &*cert_chain_guard { Some(_) => { panic!("Unhandled. CERT_CHAIN is not None."); } None => { *cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec())); } } return sgx_status_t::SGX_SUCCESS; } #[no_mangle] pub extern "C" fn start_local_attest_enc( msg1: &sgx_dh_msg1_t, msg2: &mut sgx_dh_msg2_t, sgx_root_enclave_session_id: &mut u64, ) -> sgx_status_t { let mut initiator = SgxDhInitiator::init_session(); let status = initiator.proc_msg1(msg1, msg2); assert!(!status.is_err()); let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst); { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; initiator_hash.insert(session_id, initiator); } *sgx_root_enclave_session_id = session_id; sgx_status_t::SGX_SUCCESS } const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218); const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220); fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> { let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1]; let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes); let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1]; let csr_signature = &csr[237..]; let verify_result = public_key.verify(&csr_body, &csr_signature); if verify_result.is_err() { return Err(format!("verify_csr failed:{:?}", verify_result)); } else { return Ok(true); } } #[no_mangle] pub extern "C" fn finish_local_attest_enc( dh_msg3_raw: &mut sgx_dh_msg3_t, csr: *const u8, csr_size: usize, sgx_root_enclave_session_id: u64, p_cert_buf: *mut u8, cert_buf_size: usize, p_cert_size: *mut usize, cert_lengths: *mut u32, cert_lengths_size: usize, ) -> SgxRootEnclave {
let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this let mut responder_identity = sgx_dh_session_enclave_identity_t::default(); let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity); if status.is_err() { return SgxRootEnclave::ProcMsg3Error; } // now that the msg3 is authenticated, we can generate the cert from the csr let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) }; match verify_csr(&csr_slice) { Ok(status) => match status { true => (), // Do nothing false => { println!("CSR Did not verify successfully"); return SgxRootEnclave::CsrVerifyFail; }, }, Err(err) => { println!("CSR did not verify:{:?}. Returning error", err); return SgxRootEnclave::CsrVerifyFail; }, } //generate cert from csr, signed by PRIVATE_KEY let private_key = { let private_key_vec = match get_private_key() { Ok(key) => key, Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(key) => key, Err(_) => return SgxRootEnclave::PKCS8Error, } }; let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) { Ok(bytes) => bytes, Err(err) => { println!("Failed to convert csr to cert:{:?}", err); return SgxRootEnclave::CsrToCertFail; }, }; let (mut root_enclave_cert, mut root_cert) = { let cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; match &*cert_chain_guard { Some((re_cert, r_cert)) => { (re_cert.clone(), r_cert.clone()) } None => { panic!("CERT_CHAIN is not populated"); }, } }; if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) { assert!(false); } let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) }; unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() }; let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) }; // create a buffer to aggregate the certificates let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new(); let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new(); // add the compute_enclave_cert to the return buffer temp_cert_lengths.push(compute_enclave_cert.len() as u32); temp_cert_buf.append(&mut compute_enclave_cert); // add the root_enclave cert to the temp buffer temp_cert_lengths.push(root_enclave_cert.len() as u32); temp_cert_buf.append(&mut root_enclave_cert); // add the root cert to the temp buffer temp_cert_lengths.push(root_cert.len() as u32); temp_cert_buf.append(&mut root_cert); // Copy the temporary certificate buffer contents to the destination buffer cert_buf_slice.clone_from_slice(&temp_cert_buf); cert_lengths_slice.clone_from_slice(&temp_cert_lengths); return SgxRootEnclave::Success; } fn from_slice(bytes: &[u8]) -> [u8; 32] { let mut array = [0; 32]; let bytes = &bytes[..array.len()]; // panics if not enough data for index in 0..32 { array[index] = bytes[index]; } array }
let dh_msg3_raw_len = mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length; let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) }; assert!(!dh_msg3.is_none()); let dh_msg3 = match dh_msg3 { Some(msg) => msg, None => { return SgxRootEnclave::Msg3RawError; } }; let mut initiator = { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; initiator_hash.remove(&sgx_root_enclave_session_id).unwrap() };
identifier_body
lib.rs
//! The SGX root enclave //! //! ## Authors //! //! The Veracruz Development Team. //! //! ## Licensing and copyright notice //! //! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for //! information on licensing and copyright. #![no_std] #[macro_use] extern crate sgx_tstd as std; use lazy_static::lazy_static; use sgx_tdh::{SgxDhInitiator, SgxDhMsg3}; use sgx_types; use sgx_types::{ sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t, sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t, sgx_ra_init, sgx_status_t, sgx_target_info_t, }; use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}}; use ring::{rand::SystemRandom, signature::EcdsaKeyPair}; use veracruz_utils::csr; lazy_static! { static ref SESSION_ID: AtomicU64 = AtomicU64::new(1); static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> = std::sync::SgxMutex::new(HashMap::new()); static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None); static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None); } pub enum SgxRootEnclave { Success = 0x00, Msg3RawError = 0x01, ProcMsg3Error = 0x02, CsrVerifyFail = 0x03, CsrToCertFail = 0x04, LockFail = 0x05, HashError = 0x06, PKCS8Error = 0x07, StateError = 0x08, PrivateKeyNotPopulated = 0x09, } #[no_mangle] pub extern "C" fn g
p_fwv_len: &mut usize) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); *p_fwv_len = version.len(); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn get_firmware_version( p_firmware_version_buf: *mut u8, fv_buf_size: usize, ) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); assert!(version.len() <= fv_buf_size); let version_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) }; version_buf_slice.clone_from_slice(&version.as_bytes()); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn init_remote_attestation_enc( pub_key_buf: *const u8, pub_key_size: usize, p_context: *mut sgx_ra_context_t, ) -> sgx_status_t { assert!(pub_key_size!= 0); assert!(!pub_key_buf.is_null()); let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) }; let pub_key = sgx_ec256_public_t { gx: from_slice(&pub_key_vec[0..32]), gy: from_slice(&pub_key_vec[32..64]), }; let mut context: sgx_ra_context_t = 0; assert!(pub_key_vec.len() > 0); let ret = unsafe { sgx_ra_init( &pub_key as *const sgx_ec256_public_t, 0, &mut context as *mut sgx_ra_context_t, ) }; if ret!= sgx_status_t::SGX_SUCCESS { return ret; } unsafe { *p_context = context; } return ret; } /// Retrieve or generate the private key as a Vec<u8> fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> { let mut private_key_guard = match PRIVATE_KEY.lock() { Err(_) => return Err(SgxRootEnclave::LockFail), Ok(guard) => guard, }; let pkcs8_bytes = match &*private_key_guard { Some(bytes) => { bytes.clone() } None => { // ECDSA prime256r1 generation. let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8( &ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING, &SystemRandom::new(),) .map_err(|_| SgxRootEnclave::PKCS8Error)?; *private_key_guard = Some(pkcs8_bytes.as_ref().to_vec()); pkcs8_bytes.as_ref().to_vec() } }; return Ok(pkcs8_bytes); } #[no_mangle] pub extern "C" fn sgx_get_collateral_report( p_pubkey_challenge: *const u8, pubkey_challenge_size: usize, p_target_info: *const sgx_target_info_t, report: *mut sgx_types::sgx_report_t, csr_buffer: *mut u8, csr_buf_size: usize, p_csr_size: *mut usize, ) -> sgx_status_t { let pubkey_challenge_vec = unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) }; let mut report_data = sgx_types::sgx_report_data_t::default(); // place the challenge in the report report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec); let private_key_ring = { let private_key_vec = match get_private_key() { Ok(vec) => vec, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(pkr) => pkr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, } }; // generate the certificate signing request let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) { Ok(csr) => csr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; // // place the hash of the csr in the report let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec); report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref()); let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) }; assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS); // place the csr where it needs to be if csr_vec.len() > csr_buf_size { assert!(false); } else { let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) }; csr_buf_slice.clone_from_slice(&csr_vec); unsafe { *p_csr_size = csr_vec.len() }; } sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn sgx_send_cert_chain( root_cert: *const u8, root_cert_size: usize, enclave_cert: *const u8, enclave_cert_size: usize, ) -> sgx_status_t { let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) }; let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) }; let mut cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; match &*cert_chain_guard { Some(_) => { panic!("Unhandled. CERT_CHAIN is not None."); } None => { *cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec())); } } return sgx_status_t::SGX_SUCCESS; } #[no_mangle] pub extern "C" fn start_local_attest_enc( msg1: &sgx_dh_msg1_t, msg2: &mut sgx_dh_msg2_t, sgx_root_enclave_session_id: &mut u64, ) -> sgx_status_t { let mut initiator = SgxDhInitiator::init_session(); let status = initiator.proc_msg1(msg1, msg2); assert!(!status.is_err()); let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst); { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; initiator_hash.insert(session_id, initiator); } *sgx_root_enclave_session_id = session_id; sgx_status_t::SGX_SUCCESS } const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218); const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220); fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> { let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1]; let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes); let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1]; let csr_signature = &csr[237..]; let verify_result = public_key.verify(&csr_body, &csr_signature); if verify_result.is_err() { return Err(format!("verify_csr failed:{:?}", verify_result)); } else { return Ok(true); } } #[no_mangle] pub extern "C" fn finish_local_attest_enc( dh_msg3_raw: &mut sgx_dh_msg3_t, csr: *const u8, csr_size: usize, sgx_root_enclave_session_id: u64, p_cert_buf: *mut u8, cert_buf_size: usize, p_cert_size: *mut usize, cert_lengths: *mut u32, cert_lengths_size: usize, ) -> SgxRootEnclave { let dh_msg3_raw_len = mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length; let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) }; assert!(!dh_msg3.is_none()); let dh_msg3 = match dh_msg3 { Some(msg) => msg, None => { return SgxRootEnclave::Msg3RawError; } }; let mut initiator = { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; initiator_hash.remove(&sgx_root_enclave_session_id).unwrap() }; let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this let mut responder_identity = sgx_dh_session_enclave_identity_t::default(); let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity); if status.is_err() { return SgxRootEnclave::ProcMsg3Error; } // now that the msg3 is authenticated, we can generate the cert from the csr let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) }; match verify_csr(&csr_slice) { Ok(status) => match status { true => (), // Do nothing false => { println!("CSR Did not verify successfully"); return SgxRootEnclave::CsrVerifyFail; }, }, Err(err) => { println!("CSR did not verify:{:?}. Returning error", err); return SgxRootEnclave::CsrVerifyFail; }, } //generate cert from csr, signed by PRIVATE_KEY let private_key = { let private_key_vec = match get_private_key() { Ok(key) => key, Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(key) => key, Err(_) => return SgxRootEnclave::PKCS8Error, } }; let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) { Ok(bytes) => bytes, Err(err) => { println!("Failed to convert csr to cert:{:?}", err); return SgxRootEnclave::CsrToCertFail; }, }; let (mut root_enclave_cert, mut root_cert) = { let cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; match &*cert_chain_guard { Some((re_cert, r_cert)) => { (re_cert.clone(), r_cert.clone()) } None => { panic!("CERT_CHAIN is not populated"); }, } }; if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) { assert!(false); } let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) }; unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() }; let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) }; // create a buffer to aggregate the certificates let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new(); let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new(); // add the compute_enclave_cert to the return buffer temp_cert_lengths.push(compute_enclave_cert.len() as u32); temp_cert_buf.append(&mut compute_enclave_cert); // add the root_enclave cert to the temp buffer temp_cert_lengths.push(root_enclave_cert.len() as u32); temp_cert_buf.append(&mut root_enclave_cert); // add the root cert to the temp buffer temp_cert_lengths.push(root_cert.len() as u32); temp_cert_buf.append(&mut root_cert); // Copy the temporary certificate buffer contents to the destination buffer cert_buf_slice.clone_from_slice(&temp_cert_buf); cert_lengths_slice.clone_from_slice(&temp_cert_lengths); return SgxRootEnclave::Success; } fn from_slice(bytes: &[u8]) -> [u8; 32] { let mut array = [0; 32]; let bytes = &bytes[..array.len()]; // panics if not enough data for index in 0..32 { array[index] = bytes[index]; } array }
et_firmware_version_len(
identifier_name
lib.rs
//! The SGX root enclave //! //! ## Authors //! //! The Veracruz Development Team. //! //! ## Licensing and copyright notice //! //! See the `LICENSE_MIT.markdown` file in the Veracruz root directory for //! information on licensing and copyright. #![no_std] #[macro_use] extern crate sgx_tstd as std; use lazy_static::lazy_static; use sgx_tdh::{SgxDhInitiator, SgxDhMsg3}; use sgx_types; use sgx_types::{ sgx_create_report, sgx_dh_msg1_t, sgx_dh_msg2_t, sgx_dh_msg3_t, sgx_dh_session_enclave_identity_t, sgx_ec256_public_t, sgx_key_128bit_t, sgx_ra_context_t, sgx_ra_init, sgx_status_t, sgx_target_info_t, }; use std::{collections::HashMap, mem, sync::atomic::{AtomicU64, Ordering}}; use ring::{rand::SystemRandom, signature::EcdsaKeyPair}; use veracruz_utils::csr; lazy_static! { static ref SESSION_ID: AtomicU64 = AtomicU64::new(1); static ref INITIATOR_HASH: std::sync::SgxMutex<HashMap<u64, SgxDhInitiator>> = std::sync::SgxMutex::new(HashMap::new()); static ref PRIVATE_KEY: std::sync::SgxMutex<Option<std::vec::Vec<u8>>> = std::sync::SgxMutex::new(None); static ref CERT_CHAIN: std::sync::SgxMutex<Option<(std::vec::Vec<u8>, std::vec::Vec<u8>)>> = std::sync::SgxMutex::new(None); } pub enum SgxRootEnclave { Success = 0x00, Msg3RawError = 0x01, ProcMsg3Error = 0x02, CsrVerifyFail = 0x03, CsrToCertFail = 0x04, LockFail = 0x05, HashError = 0x06, PKCS8Error = 0x07, StateError = 0x08, PrivateKeyNotPopulated = 0x09, } #[no_mangle] pub extern "C" fn get_firmware_version_len(p_fwv_len: &mut usize) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); *p_fwv_len = version.len(); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn get_firmware_version( p_firmware_version_buf: *mut u8, fv_buf_size: usize, ) -> sgx_status_t { let version = env!("CARGO_PKG_VERSION"); assert!(version.len() <= fv_buf_size); let version_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_firmware_version_buf, fv_buf_size) }; version_buf_slice.clone_from_slice(&version.as_bytes()); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn init_remote_attestation_enc( pub_key_buf: *const u8, pub_key_size: usize, p_context: *mut sgx_ra_context_t, ) -> sgx_status_t { assert!(pub_key_size!= 0); assert!(!pub_key_buf.is_null()); let pub_key_vec = unsafe { std::slice::from_raw_parts(pub_key_buf, pub_key_size) }; let pub_key = sgx_ec256_public_t { gx: from_slice(&pub_key_vec[0..32]), gy: from_slice(&pub_key_vec[32..64]), }; let mut context: sgx_ra_context_t = 0; assert!(pub_key_vec.len() > 0); let ret = unsafe { sgx_ra_init( &pub_key as *const sgx_ec256_public_t, 0, &mut context as *mut sgx_ra_context_t, ) }; if ret!= sgx_status_t::SGX_SUCCESS { return ret; } unsafe { *p_context = context; } return ret; } /// Retrieve or generate the private key as a Vec<u8> fn get_private_key() -> Result<std::vec::Vec<u8>, SgxRootEnclave> { let mut private_key_guard = match PRIVATE_KEY.lock() { Err(_) => return Err(SgxRootEnclave::LockFail), Ok(guard) => guard, }; let pkcs8_bytes = match &*private_key_guard { Some(bytes) => { bytes.clone() } None => { // ECDSA prime256r1 generation. let pkcs8_bytes = EcdsaKeyPair::generate_pkcs8( &ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING, &SystemRandom::new(),) .map_err(|_| SgxRootEnclave::PKCS8Error)?;
} #[no_mangle] pub extern "C" fn sgx_get_collateral_report( p_pubkey_challenge: *const u8, pubkey_challenge_size: usize, p_target_info: *const sgx_target_info_t, report: *mut sgx_types::sgx_report_t, csr_buffer: *mut u8, csr_buf_size: usize, p_csr_size: *mut usize, ) -> sgx_status_t { let pubkey_challenge_vec = unsafe { std::slice::from_raw_parts(p_pubkey_challenge, pubkey_challenge_size) }; let mut report_data = sgx_types::sgx_report_data_t::default(); // place the challenge in the report report_data.d[0..pubkey_challenge_size].copy_from_slice(&pubkey_challenge_vec); let private_key_ring = { let private_key_vec = match get_private_key() { Ok(vec) => vec, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(pkr) => pkr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, } }; // generate the certificate signing request let csr_vec = match csr::generate_csr(&csr::ROOT_ENCLAVE_CSR_TEMPLATE, &private_key_ring) { Ok(csr) => csr, Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, }; // // place the hash of the csr in the report let collateral_hash = ring::digest::digest(&ring::digest::SHA256, &csr_vec); report_data.d[pubkey_challenge_size..48].copy_from_slice(collateral_hash.as_ref()); let ret = unsafe { sgx_create_report(p_target_info, &report_data, report) }; assert!(ret == sgx_types::sgx_status_t::SGX_SUCCESS); // place the csr where it needs to be if csr_vec.len() > csr_buf_size { assert!(false); } else { let csr_buf_slice = unsafe { std::slice::from_raw_parts_mut(csr_buffer, csr_vec.len()) }; csr_buf_slice.clone_from_slice(&csr_vec); unsafe { *p_csr_size = csr_vec.len() }; } sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn sgx_send_cert_chain( root_cert: *const u8, root_cert_size: usize, enclave_cert: *const u8, enclave_cert_size: usize, ) -> sgx_status_t { let root_cert_slice = unsafe { std::slice::from_raw_parts(root_cert, root_cert_size) }; let enclave_cert_slice = unsafe { std::slice::from_raw_parts(enclave_cert, enclave_cert_size) }; let mut cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; match &*cert_chain_guard { Some(_) => { panic!("Unhandled. CERT_CHAIN is not None."); } None => { *cert_chain_guard = Some((enclave_cert_slice.to_vec(), root_cert_slice.to_vec())); } } return sgx_status_t::SGX_SUCCESS; } #[no_mangle] pub extern "C" fn start_local_attest_enc( msg1: &sgx_dh_msg1_t, msg2: &mut sgx_dh_msg2_t, sgx_root_enclave_session_id: &mut u64, ) -> sgx_status_t { let mut initiator = SgxDhInitiator::init_session(); let status = initiator.proc_msg1(msg1, msg2); assert!(!status.is_err()); let session_id = SESSION_ID.fetch_add(1, Ordering::SeqCst); { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return sgx_status_t::SGX_ERROR_UNEXPECTED, Ok(guard) => guard, }; initiator_hash.insert(session_id, initiator); } *sgx_root_enclave_session_id = session_id; sgx_status_t::SGX_SUCCESS } const CSR_BODY_LOCATION: (usize, usize) = (4, 4 + 218); const CSR_PUBKEY_LOCATION: (usize, usize) = (129 + 26, 220); fn verify_csr(csr: &[u8]) -> Result<bool, std::string::String> { let pubkey_bytes = &csr[CSR_PUBKEY_LOCATION.0..CSR_PUBKEY_LOCATION.1]; let public_key = ring::signature::UnparsedPublicKey::new(&ring::signature::ECDSA_P256_SHA256_ASN1, pubkey_bytes); let csr_body = &csr[CSR_BODY_LOCATION.0..CSR_BODY_LOCATION.1]; let csr_signature = &csr[237..]; let verify_result = public_key.verify(&csr_body, &csr_signature); if verify_result.is_err() { return Err(format!("verify_csr failed:{:?}", verify_result)); } else { return Ok(true); } } #[no_mangle] pub extern "C" fn finish_local_attest_enc( dh_msg3_raw: &mut sgx_dh_msg3_t, csr: *const u8, csr_size: usize, sgx_root_enclave_session_id: u64, p_cert_buf: *mut u8, cert_buf_size: usize, p_cert_size: *mut usize, cert_lengths: *mut u32, cert_lengths_size: usize, ) -> SgxRootEnclave { let dh_msg3_raw_len = mem::size_of::<sgx_dh_msg3_t>() as u32 + dh_msg3_raw.msg3_body.additional_prop_length; let dh_msg3 = unsafe { SgxDhMsg3::from_raw_dh_msg3_t(dh_msg3_raw, dh_msg3_raw_len) }; assert!(!dh_msg3.is_none()); let dh_msg3 = match dh_msg3 { Some(msg) => msg, None => { return SgxRootEnclave::Msg3RawError; } }; let mut initiator = { let mut initiator_hash = match INITIATOR_HASH.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; initiator_hash.remove(&sgx_root_enclave_session_id).unwrap() }; let mut dh_aek: sgx_key_128bit_t = sgx_key_128bit_t::default(); // Session Key, we won't use this let mut responder_identity = sgx_dh_session_enclave_identity_t::default(); let status = initiator.proc_msg3(&dh_msg3, &mut dh_aek, &mut responder_identity); if status.is_err() { return SgxRootEnclave::ProcMsg3Error; } // now that the msg3 is authenticated, we can generate the cert from the csr let csr_slice = unsafe { std::slice::from_raw_parts(csr, csr_size) }; match verify_csr(&csr_slice) { Ok(status) => match status { true => (), // Do nothing false => { println!("CSR Did not verify successfully"); return SgxRootEnclave::CsrVerifyFail; }, }, Err(err) => { println!("CSR did not verify:{:?}. Returning error", err); return SgxRootEnclave::CsrVerifyFail; }, } //generate cert from csr, signed by PRIVATE_KEY let private_key = { let private_key_vec = match get_private_key() { Ok(key) => key, Err(_) => return SgxRootEnclave::PrivateKeyNotPopulated, }; match EcdsaKeyPair::from_pkcs8(&ring::signature::ECDSA_P256_SHA256_ASN1_SIGNING, &private_key_vec) { Ok(key) => key, Err(_) => return SgxRootEnclave::PKCS8Error, } }; let mut compute_enclave_cert = match csr::convert_csr_to_cert(&csr_slice, &csr::COMPUTE_ENCLAVE_CERT_TEMPLATE, &responder_identity.mr_enclave.m, &private_key) { Ok(bytes) => bytes, Err(err) => { println!("Failed to convert csr to cert:{:?}", err); return SgxRootEnclave::CsrToCertFail; }, }; let (mut root_enclave_cert, mut root_cert) = { let cert_chain_guard = match CERT_CHAIN.lock() { Err(_) => return SgxRootEnclave::LockFail, Ok(guard) => guard, }; match &*cert_chain_guard { Some((re_cert, r_cert)) => { (re_cert.clone(), r_cert.clone()) } None => { panic!("CERT_CHAIN is not populated"); }, } }; if cert_buf_size < (compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) { assert!(false); } let cert_buf_slice = unsafe { std::slice::from_raw_parts_mut(p_cert_buf, compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len()) }; unsafe { *p_cert_size = compute_enclave_cert.len() + root_enclave_cert.len() + root_cert.len() }; let cert_lengths_slice = unsafe { std::slice::from_raw_parts_mut(cert_lengths, cert_lengths_size/std::mem::size_of::<u32>()) }; // create a buffer to aggregate the certificates let mut temp_cert_buf: std::vec::Vec<u8> = std::vec::Vec::new(); let mut temp_cert_lengths: std::vec::Vec<u32> = std::vec::Vec::new(); // add the compute_enclave_cert to the return buffer temp_cert_lengths.push(compute_enclave_cert.len() as u32); temp_cert_buf.append(&mut compute_enclave_cert); // add the root_enclave cert to the temp buffer temp_cert_lengths.push(root_enclave_cert.len() as u32); temp_cert_buf.append(&mut root_enclave_cert); // add the root cert to the temp buffer temp_cert_lengths.push(root_cert.len() as u32); temp_cert_buf.append(&mut root_cert); // Copy the temporary certificate buffer contents to the destination buffer cert_buf_slice.clone_from_slice(&temp_cert_buf); cert_lengths_slice.clone_from_slice(&temp_cert_lengths); return SgxRootEnclave::Success; } fn from_slice(bytes: &[u8]) -> [u8; 32] { let mut array = [0; 32]; let bytes = &bytes[..array.len()]; // panics if not enough data for index in 0..32 { array[index] = bytes[index]; } array }
*private_key_guard = Some(pkcs8_bytes.as_ref().to_vec()); pkcs8_bytes.as_ref().to_vec() } }; return Ok(pkcs8_bytes);
random_line_split
main.rs
#![allow(clippy::needless_return)] #![feature(portable_simd)] use core_simd::Simd; use core::convert::TryInto; use srng::SRng; use simd_aes::SimdAes; const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([ 178, 201, 95, 240, 40, 41, 143, 216, 2, 209, 178, 114, 232, 4, 176, 188, ]); #[allow(non_snake_case)] fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> { let zero = Simd::splat(0); let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes(); hash ^= DEFAULT_SEED; let mut chunks = data.chunks_exact(16); for chunk in chunks.by_ref() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); } let remainder = chunks.remainder(); let mut temp = [0_u8; 16]; temp[..remainder.len()].copy_from_slice(remainder); let value = Simd::from_array(temp); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); return hash; } #[allow(dead_code)] fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> { data ^= key; let zero = Simd::splat(0); data = data.aes_dec_last(zero).aes_enc(zero); return data.aes_enc_last(zero); } fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let zero = Simd::splat(0); hash = hash.aes_dec_last(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc_last(zero); return hash; } fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> { // The first stage looks like this: // Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk) // To get the chunk, we need to reverse these: // dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk // Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed // To create a one-prefix initialization, we want: // Hash = Count // Count = Count + 16 let mut hash = target_hash; hash = inv_aes_decx4(hash); let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes(); hash ^= prefix_init; hash ^= DEFAULT_SEED; return hash; } fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> { let chunks = data.len() / 16; let tail = &data[chunks*16..]; let mut tail_buf = [0_u8; 16]; tail_buf[..tail.len()].copy_from_slice(tail); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; for chunk in data.chunks_exact(16).rev() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); hash ^= value; } return hash; } fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); return hash ^ value; } fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let mut tail_buf = [0_u8; 16]; tail_buf[..suffix.len()].copy_from_slice(suffix); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; hash = inv_aes_decx4(hash); return hash; } fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> { let mut image = prefix.to_array().to_vec(); image.extend_from_slice(target); image } fn prefix_collision_attack(message: &[u8])
fn chosen_prefix(prefix: &[u8]) { let zero = Simd::splat(0); let mut message = prefix.to_vec(); let remainder = 16 - (message.len() % 16); message.extend((0..remainder).map(|_| b'A')); message.extend((0..16).map(|_| 0)); let hash = ComputeGlyphHash(&message); let pre_current = invert_last(&[], hash); let pre_target = invert_last(&[], zero); let last = message.len() - 16; let suffix = pre_current ^ pre_target; message[last..].copy_from_slice(&suffix.to_array()); println!("Demonstrating chosen prefix attack"); println!("prefix: {:x?}", prefix); println!("forgery: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); println!(); } fn preimage_attack(suffix: &[u8]) { println!("Demonstrating preimage attack"); println!("suffix: {:x?}", suffix); let target_hash = Simd::splat(0); println!("goal hash: {:x?}", target_hash); let prefix_hash = preimage_prefix_hash(target_hash, suffix); let preimage_prefix = single_prefix(suffix.len(), prefix_hash); println!("prefix: {:x?}", preimage_prefix); let message = concat(preimage_prefix, suffix); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); } fn padding_attack() { println!("Demonstrating padding attack"); println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b"")); println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01")); println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A")); println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00")); println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA")); println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00")); println!(); } fn invert_attack(message: &[u8]) { println!("Demonstrating invert attack, invert a hash up to 15 bytes"); println!("Note: due to padding attack, there are actually more messages"); println!("plaintext: {:x?}", message); let mut hash = ComputeGlyphHash(message); println!("hash: {:x?}", hash); hash = inv_aes_decx4(hash); hash ^= DEFAULT_SEED; let mut buffer = hash.to_array(); let len = buffer.iter().rposition(|&chr| chr!= 0).map_or(0, |x| x + 1); if len == 16 { println!("the plaintext mus be shorter than 16 bytes, cannot invert"); return; } buffer[0] ^= len as u8; let recovered = &buffer[..len]; println!("recovered: {:x?}", recovered); println!("hash: {:x?}", ComputeGlyphHash(recovered)); println!(); } pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool { // check if the characters are outside of '0'..'z' range if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() { return false; } // check if the characters are in of '9'+1..'A'-1 range if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() { return false; } // check if the characters are in of 'Z'+1..'a'-1 range if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() { return false; } return true; } use core::sync::atomic::{AtomicBool, Ordering}; static FOUND: AtomicBool = AtomicBool::new(false); fn find_ascii_zeros(suffix: &[u8], worker: u64) { const ATTACK_BYTES: usize = 6; let mut target_hash = Simd::<u8, 16>::splat(0); let mut bsuffix = suffix; let suffix_len = 16 - ATTACK_BYTES; let mut whole_block = false; if suffix.len() >= suffix_len { target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]); bsuffix = &suffix[..suffix_len]; whole_block = true; } let mut controlled = [0u8; 16]; let total_len = ATTACK_BYTES + suffix.len(); let controlled_bytes = total_len.min(16); let controlled = &mut controlled[..controlled_bytes]; controlled[ATTACK_BYTES..].copy_from_slice(bsuffix); let seed = Simd::from_array([ 17820195240, 4041143216, 22093178114, 2324176188, ]); let mut rng = SRng::new(seed * Simd::splat(worker + 1)); let start = std::time::Instant::now(); for ii in 0_u64.. { if FOUND.load(Ordering::Relaxed) { return; } let prefix = rng.random_alphanum(); controlled[..6].copy_from_slice(&prefix[..6]); let prefix = { let prefix_hash = if whole_block { invert_block(target_hash, controlled) } else { preimage_prefix_hash(target_hash, controlled) }; single_prefix(total_len, prefix_hash) }; if check_alphanum(prefix) { FOUND.store(true, Ordering::Relaxed); let mut buffer = prefix.to_array().to_vec(); buffer.extend_from_slice(&controlled[..6]); buffer.extend_from_slice(suffix); let elapsed = start.elapsed(); let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64(); eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs); eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer)); println!("{}", core::str::from_utf8(&buffer).unwrap()); break; } } } const MESSAGE: &[&[u8]] = &[ b" Hello Casey! I hope this message finds you well.", b" Please ignore those 22 random chars to the left for now.", b" The work you've done on refterm is admirable. There are", b" not enough performance conscious programmers around, and", b" we need a demonstration of what is achievable. However,", b" I would like to address the claim that the hash function", b" used in refterm is 'cryptographically secure'. There is", b" a very specific meaning attached to those words, namely:", b" 1) it is hard to create a message for a given hash value", b" 2) it is hard to produce two messages with the same hash", b" If you check, the following strings have the same hash:", b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,", b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.", b" In fact, every line in the message yields the exact same", b" hash value. That is 0x00000000000000000000000000000000.", b" I believe this was a clear enough demonstration that the", b" hash function `ComputeGlyphHash` isn't cryptographically", b" secure, and that an attacker can corrupt the glyph cache", b" by printing glyphs with the same hash. The main problem", b" with this hash function is that all operations consuming", b" bytes are invertible. Which means an attacker could run", b" the hash function in reverse, consuming the message from", b" behind, and calculate the message to get the given hash.", b" The hash is also weak to a padding attack. For example,", br#" two strings "A" and "B\x00" yield the same hash, because"#, b" the padding is constant, so zero byte in the end doens't", b" matter, and the first byte is `xor`ed with input length.", b" If you'd like to, you can read this blog post explaining", b" these attacks in detail and how to avoid them using well", b" known methods: https://m1el.github.io/refterm-hash", b" Best regards, -- Igor", ]; fn main() { padding_attack(); invert_attack(b"Qwerty123"); prefix_collision_attack(b"hello"); chosen_prefix(b"hello"); preimage_attack(b"hello"); const THREADS: u64 = 16; for msg in MESSAGE { FOUND.store(false, Ordering::Relaxed); let threads = (0..THREADS) .map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker))) .collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }; }
{ let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes(); target_hash ^= DEFAULT_SEED; let prefix = single_prefix(message.len(), target_hash); println!("Demonstrating prefix attack"); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(b"hello")); println!("prefix: {:x?}", prefix); let forgery = concat(prefix, message); println!("forgery: {:x?}", forgery); println!("hash: {:x?}", ComputeGlyphHash(&forgery)); println!(); }
identifier_body
main.rs
#![allow(clippy::needless_return)] #![feature(portable_simd)] use core_simd::Simd; use core::convert::TryInto; use srng::SRng; use simd_aes::SimdAes; const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([ 178, 201, 95, 240, 40, 41, 143, 216, 2, 209, 178, 114, 232, 4, 176, 188, ]); #[allow(non_snake_case)] fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> { let zero = Simd::splat(0); let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes(); hash ^= DEFAULT_SEED; let mut chunks = data.chunks_exact(16); for chunk in chunks.by_ref() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); } let remainder = chunks.remainder(); let mut temp = [0_u8; 16]; temp[..remainder.len()].copy_from_slice(remainder); let value = Simd::from_array(temp); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); return hash; } #[allow(dead_code)] fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> { data ^= key; let zero = Simd::splat(0); data = data.aes_dec_last(zero).aes_enc(zero); return data.aes_enc_last(zero); } fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let zero = Simd::splat(0); hash = hash.aes_dec_last(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc_last(zero); return hash; } fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> { // The first stage looks like this: // Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk) // To get the chunk, we need to reverse these: // dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk // Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed // To create a one-prefix initialization, we want: // Hash = Count // Count = Count + 16 let mut hash = target_hash; hash = inv_aes_decx4(hash); let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes(); hash ^= prefix_init; hash ^= DEFAULT_SEED; return hash; } fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> { let chunks = data.len() / 16; let tail = &data[chunks*16..]; let mut tail_buf = [0_u8; 16]; tail_buf[..tail.len()].copy_from_slice(tail); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; for chunk in data.chunks_exact(16).rev() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); hash ^= value; } return hash; } fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); return hash ^ value; } fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let mut tail_buf = [0_u8; 16]; tail_buf[..suffix.len()].copy_from_slice(suffix); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; hash = inv_aes_decx4(hash); return hash; } fn
(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> { let mut image = prefix.to_array().to_vec(); image.extend_from_slice(target); image } fn prefix_collision_attack(message: &[u8]) { let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes(); target_hash ^= DEFAULT_SEED; let prefix = single_prefix(message.len(), target_hash); println!("Demonstrating prefix attack"); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(b"hello")); println!("prefix: {:x?}", prefix); let forgery = concat(prefix, message); println!("forgery: {:x?}", forgery); println!("hash: {:x?}", ComputeGlyphHash(&forgery)); println!(); } fn chosen_prefix(prefix: &[u8]) { let zero = Simd::splat(0); let mut message = prefix.to_vec(); let remainder = 16 - (message.len() % 16); message.extend((0..remainder).map(|_| b'A')); message.extend((0..16).map(|_| 0)); let hash = ComputeGlyphHash(&message); let pre_current = invert_last(&[], hash); let pre_target = invert_last(&[], zero); let last = message.len() - 16; let suffix = pre_current ^ pre_target; message[last..].copy_from_slice(&suffix.to_array()); println!("Demonstrating chosen prefix attack"); println!("prefix: {:x?}", prefix); println!("forgery: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); println!(); } fn preimage_attack(suffix: &[u8]) { println!("Demonstrating preimage attack"); println!("suffix: {:x?}", suffix); let target_hash = Simd::splat(0); println!("goal hash: {:x?}", target_hash); let prefix_hash = preimage_prefix_hash(target_hash, suffix); let preimage_prefix = single_prefix(suffix.len(), prefix_hash); println!("prefix: {:x?}", preimage_prefix); let message = concat(preimage_prefix, suffix); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); } fn padding_attack() { println!("Demonstrating padding attack"); println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b"")); println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01")); println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A")); println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00")); println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA")); println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00")); println!(); } fn invert_attack(message: &[u8]) { println!("Demonstrating invert attack, invert a hash up to 15 bytes"); println!("Note: due to padding attack, there are actually more messages"); println!("plaintext: {:x?}", message); let mut hash = ComputeGlyphHash(message); println!("hash: {:x?}", hash); hash = inv_aes_decx4(hash); hash ^= DEFAULT_SEED; let mut buffer = hash.to_array(); let len = buffer.iter().rposition(|&chr| chr!= 0).map_or(0, |x| x + 1); if len == 16 { println!("the plaintext mus be shorter than 16 bytes, cannot invert"); return; } buffer[0] ^= len as u8; let recovered = &buffer[..len]; println!("recovered: {:x?}", recovered); println!("hash: {:x?}", ComputeGlyphHash(recovered)); println!(); } pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool { // check if the characters are outside of '0'..'z' range if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() { return false; } // check if the characters are in of '9'+1..'A'-1 range if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() { return false; } // check if the characters are in of 'Z'+1..'a'-1 range if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() { return false; } return true; } use core::sync::atomic::{AtomicBool, Ordering}; static FOUND: AtomicBool = AtomicBool::new(false); fn find_ascii_zeros(suffix: &[u8], worker: u64) { const ATTACK_BYTES: usize = 6; let mut target_hash = Simd::<u8, 16>::splat(0); let mut bsuffix = suffix; let suffix_len = 16 - ATTACK_BYTES; let mut whole_block = false; if suffix.len() >= suffix_len { target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]); bsuffix = &suffix[..suffix_len]; whole_block = true; } let mut controlled = [0u8; 16]; let total_len = ATTACK_BYTES + suffix.len(); let controlled_bytes = total_len.min(16); let controlled = &mut controlled[..controlled_bytes]; controlled[ATTACK_BYTES..].copy_from_slice(bsuffix); let seed = Simd::from_array([ 17820195240, 4041143216, 22093178114, 2324176188, ]); let mut rng = SRng::new(seed * Simd::splat(worker + 1)); let start = std::time::Instant::now(); for ii in 0_u64.. { if FOUND.load(Ordering::Relaxed) { return; } let prefix = rng.random_alphanum(); controlled[..6].copy_from_slice(&prefix[..6]); let prefix = { let prefix_hash = if whole_block { invert_block(target_hash, controlled) } else { preimage_prefix_hash(target_hash, controlled) }; single_prefix(total_len, prefix_hash) }; if check_alphanum(prefix) { FOUND.store(true, Ordering::Relaxed); let mut buffer = prefix.to_array().to_vec(); buffer.extend_from_slice(&controlled[..6]); buffer.extend_from_slice(suffix); let elapsed = start.elapsed(); let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64(); eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs); eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer)); println!("{}", core::str::from_utf8(&buffer).unwrap()); break; } } } const MESSAGE: &[&[u8]] = &[ b" Hello Casey! I hope this message finds you well.", b" Please ignore those 22 random chars to the left for now.", b" The work you've done on refterm is admirable. There are", b" not enough performance conscious programmers around, and", b" we need a demonstration of what is achievable. However,", b" I would like to address the claim that the hash function", b" used in refterm is 'cryptographically secure'. There is", b" a very specific meaning attached to those words, namely:", b" 1) it is hard to create a message for a given hash value", b" 2) it is hard to produce two messages with the same hash", b" If you check, the following strings have the same hash:", b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,", b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.", b" In fact, every line in the message yields the exact same", b" hash value. That is 0x00000000000000000000000000000000.", b" I believe this was a clear enough demonstration that the", b" hash function `ComputeGlyphHash` isn't cryptographically", b" secure, and that an attacker can corrupt the glyph cache", b" by printing glyphs with the same hash. The main problem", b" with this hash function is that all operations consuming", b" bytes are invertible. Which means an attacker could run", b" the hash function in reverse, consuming the message from", b" behind, and calculate the message to get the given hash.", b" The hash is also weak to a padding attack. For example,", br#" two strings "A" and "B\x00" yield the same hash, because"#, b" the padding is constant, so zero byte in the end doens't", b" matter, and the first byte is `xor`ed with input length.", b" If you'd like to, you can read this blog post explaining", b" these attacks in detail and how to avoid them using well", b" known methods: https://m1el.github.io/refterm-hash", b" Best regards, -- Igor", ]; fn main() { padding_attack(); invert_attack(b"Qwerty123"); prefix_collision_attack(b"hello"); chosen_prefix(b"hello"); preimage_attack(b"hello"); const THREADS: u64 = 16; for msg in MESSAGE { FOUND.store(false, Ordering::Relaxed); let threads = (0..THREADS) .map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker))) .collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }; }
concat
identifier_name
main.rs
#![allow(clippy::needless_return)] #![feature(portable_simd)] use core_simd::Simd; use core::convert::TryInto; use srng::SRng; use simd_aes::SimdAes; const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([ 178, 201, 95, 240, 40, 41, 143, 216, 2, 209, 178, 114, 232, 4, 176, 188, ]); #[allow(non_snake_case)] fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> { let zero = Simd::splat(0); let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes(); hash ^= DEFAULT_SEED; let mut chunks = data.chunks_exact(16); for chunk in chunks.by_ref() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); } let remainder = chunks.remainder(); let mut temp = [0_u8; 16]; temp[..remainder.len()].copy_from_slice(remainder); let value = Simd::from_array(temp); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); return hash; } #[allow(dead_code)] fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> { data ^= key; let zero = Simd::splat(0); data = data.aes_dec_last(zero).aes_enc(zero); return data.aes_enc_last(zero); } fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let zero = Simd::splat(0); hash = hash.aes_dec_last(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc_last(zero); return hash; } fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> { // The first stage looks like this: // Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk) // To get the chunk, we need to reverse these: // dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk // Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed // To create a one-prefix initialization, we want: // Hash = Count // Count = Count + 16 let mut hash = target_hash; hash = inv_aes_decx4(hash); let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes(); hash ^= prefix_init; hash ^= DEFAULT_SEED; return hash; } fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> { let chunks = data.len() / 16; let tail = &data[chunks*16..]; let mut tail_buf = [0_u8; 16]; tail_buf[..tail.len()].copy_from_slice(tail); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; for chunk in data.chunks_exact(16).rev() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); hash ^= value; } return hash; } fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); return hash ^ value; } fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let mut tail_buf = [0_u8; 16]; tail_buf[..suffix.len()].copy_from_slice(suffix); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; hash = inv_aes_decx4(hash); return hash; } fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> { let mut image = prefix.to_array().to_vec(); image.extend_from_slice(target); image } fn prefix_collision_attack(message: &[u8]) { let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes(); target_hash ^= DEFAULT_SEED; let prefix = single_prefix(message.len(), target_hash); println!("Demonstrating prefix attack"); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(b"hello")); println!("prefix: {:x?}", prefix); let forgery = concat(prefix, message); println!("forgery: {:x?}", forgery); println!("hash: {:x?}", ComputeGlyphHash(&forgery)); println!(); } fn chosen_prefix(prefix: &[u8]) { let zero = Simd::splat(0); let mut message = prefix.to_vec(); let remainder = 16 - (message.len() % 16); message.extend((0..remainder).map(|_| b'A')); message.extend((0..16).map(|_| 0)); let hash = ComputeGlyphHash(&message); let pre_current = invert_last(&[], hash); let pre_target = invert_last(&[], zero); let last = message.len() - 16; let suffix = pre_current ^ pre_target; message[last..].copy_from_slice(&suffix.to_array()); println!("Demonstrating chosen prefix attack"); println!("prefix: {:x?}", prefix); println!("forgery: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); println!(); } fn preimage_attack(suffix: &[u8]) { println!("Demonstrating preimage attack"); println!("suffix: {:x?}", suffix); let target_hash = Simd::splat(0); println!("goal hash: {:x?}", target_hash); let prefix_hash = preimage_prefix_hash(target_hash, suffix); let preimage_prefix = single_prefix(suffix.len(), prefix_hash); println!("prefix: {:x?}", preimage_prefix); let message = concat(preimage_prefix, suffix); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); } fn padding_attack() { println!("Demonstrating padding attack"); println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b"")); println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01")); println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A")); println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00")); println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA")); println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00")); println!(); } fn invert_attack(message: &[u8]) { println!("Demonstrating invert attack, invert a hash up to 15 bytes"); println!("Note: due to padding attack, there are actually more messages"); println!("plaintext: {:x?}", message); let mut hash = ComputeGlyphHash(message); println!("hash: {:x?}", hash); hash = inv_aes_decx4(hash); hash ^= DEFAULT_SEED; let mut buffer = hash.to_array(); let len = buffer.iter().rposition(|&chr| chr!= 0).map_or(0, |x| x + 1); if len == 16 { println!("the plaintext mus be shorter than 16 bytes, cannot invert"); return; } buffer[0] ^= len as u8; let recovered = &buffer[..len]; println!("recovered: {:x?}", recovered); println!("hash: {:x?}", ComputeGlyphHash(recovered)); println!(); } pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool { // check if the characters are outside of '0'..'z' range if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() { return false; } // check if the characters are in of '9'+1..'A'-1 range if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() { return false; } // check if the characters are in of 'Z'+1..'a'-1 range if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() { return false; } return true; } use core::sync::atomic::{AtomicBool, Ordering}; static FOUND: AtomicBool = AtomicBool::new(false); fn find_ascii_zeros(suffix: &[u8], worker: u64) { const ATTACK_BYTES: usize = 6; let mut target_hash = Simd::<u8, 16>::splat(0); let mut bsuffix = suffix; let suffix_len = 16 - ATTACK_BYTES; let mut whole_block = false; if suffix.len() >= suffix_len { target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]); bsuffix = &suffix[..suffix_len]; whole_block = true; } let mut controlled = [0u8; 16]; let total_len = ATTACK_BYTES + suffix.len(); let controlled_bytes = total_len.min(16); let controlled = &mut controlled[..controlled_bytes]; controlled[ATTACK_BYTES..].copy_from_slice(bsuffix); let seed = Simd::from_array([ 17820195240, 4041143216, 22093178114, 2324176188, ]); let mut rng = SRng::new(seed * Simd::splat(worker + 1)); let start = std::time::Instant::now(); for ii in 0_u64.. { if FOUND.load(Ordering::Relaxed) { return; } let prefix = rng.random_alphanum(); controlled[..6].copy_from_slice(&prefix[..6]); let prefix = { let prefix_hash = if whole_block { invert_block(target_hash, controlled) } else { preimage_prefix_hash(target_hash, controlled) }; single_prefix(total_len, prefix_hash) }; if check_alphanum(prefix) { FOUND.store(true, Ordering::Relaxed); let mut buffer = prefix.to_array().to_vec(); buffer.extend_from_slice(&controlled[..6]); buffer.extend_from_slice(suffix); let elapsed = start.elapsed(); let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64(); eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs); eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer)); println!("{}", core::str::from_utf8(&buffer).unwrap()); break; } } } const MESSAGE: &[&[u8]] = &[ b" Hello Casey! I hope this message finds you well.", b" Please ignore those 22 random chars to the left for now.", b" The work you've done on refterm is admirable. There are", b" not enough performance conscious programmers around, and", b" we need a demonstration of what is achievable. However,", b" I would like to address the claim that the hash function", b" used in refterm is 'cryptographically secure'. There is", b" a very specific meaning attached to those words, namely:", b" 1) it is hard to create a message for a given hash value", b" 2) it is hard to produce two messages with the same hash", b" If you check, the following strings have the same hash:", b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,", b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.", b" In fact, every line in the message yields the exact same", b" hash value. That is 0x00000000000000000000000000000000.", b" I believe this was a clear enough demonstration that the", b" hash function `ComputeGlyphHash` isn't cryptographically", b" secure, and that an attacker can corrupt the glyph cache", b" by printing glyphs with the same hash. The main problem", b" with this hash function is that all operations consuming", b" bytes are invertible. Which means an attacker could run", b" the hash function in reverse, consuming the message from", b" behind, and calculate the message to get the given hash.", b" The hash is also weak to a padding attack. For example,",
b" known methods: https://m1el.github.io/refterm-hash", b" Best regards, -- Igor", ]; fn main() { padding_attack(); invert_attack(b"Qwerty123"); prefix_collision_attack(b"hello"); chosen_prefix(b"hello"); preimage_attack(b"hello"); const THREADS: u64 = 16; for msg in MESSAGE { FOUND.store(false, Ordering::Relaxed); let threads = (0..THREADS) .map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker))) .collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }; }
br#" two strings "A" and "B\x00" yield the same hash, because"#, b" the padding is constant, so zero byte in the end doens't", b" matter, and the first byte is `xor`ed with input length.", b" If you'd like to, you can read this blog post explaining", b" these attacks in detail and how to avoid them using well",
random_line_split
main.rs
#![allow(clippy::needless_return)] #![feature(portable_simd)] use core_simd::Simd; use core::convert::TryInto; use srng::SRng; use simd_aes::SimdAes; const DEFAULT_SEED: Simd<u8, 16> = Simd::from_array([ 178, 201, 95, 240, 40, 41, 143, 216, 2, 209, 178, 114, 232, 4, 176, 188, ]); #[allow(non_snake_case)] fn ComputeGlyphHash(data: &[u8]) -> Simd<u8, 16> { let zero = Simd::splat(0); let mut hash = Simd::<u64, 2>::from_array([data.len() as u64, 0]).to_ne_bytes(); hash ^= DEFAULT_SEED; let mut chunks = data.chunks_exact(16); for chunk in chunks.by_ref() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); } let remainder = chunks.remainder(); let mut temp = [0_u8; 16]; temp[..remainder.len()].copy_from_slice(remainder); let value = Simd::from_array(temp); hash ^= value; hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); hash = hash.aes_dec(zero); return hash; } #[allow(dead_code)] fn inv_aes_dec(mut data: Simd<u8, 16>, key: Simd<u8, 16>) -> Simd<u8, 16> { data ^= key; let zero = Simd::splat(0); data = data.aes_dec_last(zero).aes_enc(zero); return data.aes_enc_last(zero); } fn inv_aes_decx4(mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let zero = Simd::splat(0); hash = hash.aes_dec_last(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc(zero); hash = hash.aes_enc_last(zero); return hash; } fn single_prefix(count: usize, target_hash: Simd<u8, 16>) -> Simd<u8, 16> { // The first stage looks like this: // Hash ^ Seed = dec^4(Count ^ Seed ^ Chunk) // To get the chunk, we need to reverse these: // dec^-4(Hash ^ Seed) = Count ^ Seed ^ Chunk // Chunk = dec^4(Hash ^ Seed) ^ Count ^ Seed // To create a one-prefix initialization, we want: // Hash = Count // Count = Count + 16 let mut hash = target_hash; hash = inv_aes_decx4(hash); let prefix_init = Simd::<u64, 2>::from_array([count as u64 + 16, 0]).to_ne_bytes(); hash ^= prefix_init; hash ^= DEFAULT_SEED; return hash; } fn preimage_prefix_hash(mut hash: Simd<u8, 16>, data: &[u8]) -> Simd<u8, 16> { let chunks = data.len() / 16; let tail = &data[chunks*16..]; let mut tail_buf = [0_u8; 16]; tail_buf[..tail.len()].copy_from_slice(tail); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; for chunk in data.chunks_exact(16).rev() { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); hash ^= value; } return hash; } fn invert_block(mut hash: Simd<u8, 16>, chunk: &[u8]) -> Simd<u8, 16> { let chunk: &[u8; 16] = chunk.try_into().unwrap(); let value = Simd::from_array(*chunk); hash = inv_aes_decx4(hash); return hash ^ value; } fn invert_last(suffix: &[u8], mut hash: Simd<u8, 16>) -> Simd<u8, 16> { let mut tail_buf = [0_u8; 16]; tail_buf[..suffix.len()].copy_from_slice(suffix); let value = Simd::from_array(tail_buf); hash = inv_aes_decx4(hash); hash ^= value; hash = inv_aes_decx4(hash); return hash; } fn concat(prefix: Simd<u8, 16>, target: &[u8]) -> Vec<u8> { let mut image = prefix.to_array().to_vec(); image.extend_from_slice(target); image } fn prefix_collision_attack(message: &[u8]) { let mut target_hash = Simd::<u64, 2>::from_array([message.len() as u64, 0]).to_ne_bytes(); target_hash ^= DEFAULT_SEED; let prefix = single_prefix(message.len(), target_hash); println!("Demonstrating prefix attack"); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(b"hello")); println!("prefix: {:x?}", prefix); let forgery = concat(prefix, message); println!("forgery: {:x?}", forgery); println!("hash: {:x?}", ComputeGlyphHash(&forgery)); println!(); } fn chosen_prefix(prefix: &[u8]) { let zero = Simd::splat(0); let mut message = prefix.to_vec(); let remainder = 16 - (message.len() % 16); message.extend((0..remainder).map(|_| b'A')); message.extend((0..16).map(|_| 0)); let hash = ComputeGlyphHash(&message); let pre_current = invert_last(&[], hash); let pre_target = invert_last(&[], zero); let last = message.len() - 16; let suffix = pre_current ^ pre_target; message[last..].copy_from_slice(&suffix.to_array()); println!("Demonstrating chosen prefix attack"); println!("prefix: {:x?}", prefix); println!("forgery: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); println!(); } fn preimage_attack(suffix: &[u8]) { println!("Demonstrating preimage attack"); println!("suffix: {:x?}", suffix); let target_hash = Simd::splat(0); println!("goal hash: {:x?}", target_hash); let prefix_hash = preimage_prefix_hash(target_hash, suffix); let preimage_prefix = single_prefix(suffix.len(), prefix_hash); println!("prefix: {:x?}", preimage_prefix); let message = concat(preimage_prefix, suffix); println!("message: {:x?}", message); println!("hash: {:x?}", ComputeGlyphHash(&message)); } fn padding_attack() { println!("Demonstrating padding attack"); println!(r#"message: "", hash: {:x?}"#, ComputeGlyphHash(b"")); println!(r#"message: "\x01", hash: {:x?}"#, ComputeGlyphHash(b"\x01")); println!(r#"message: "A", hash: {:x?}"#, ComputeGlyphHash(b"A")); println!(r#"message: "B\x00", hash: {:x?}"#, ComputeGlyphHash(b"B\x00")); println!(r#"message: "BAAAAAAAAAAAAAAA", hash: {:x?}"#, ComputeGlyphHash(b"BAAAAAAAAAAAAAAA")); println!(r#"message: "CAAAAAAAAAAAAAAA\x00", hash: {:x?}"#, ComputeGlyphHash(b"CAAAAAAAAAAAAAAA\x00")); println!(); } fn invert_attack(message: &[u8]) { println!("Demonstrating invert attack, invert a hash up to 15 bytes"); println!("Note: due to padding attack, there are actually more messages"); println!("plaintext: {:x?}", message); let mut hash = ComputeGlyphHash(message); println!("hash: {:x?}", hash); hash = inv_aes_decx4(hash); hash ^= DEFAULT_SEED; let mut buffer = hash.to_array(); let len = buffer.iter().rposition(|&chr| chr!= 0).map_or(0, |x| x + 1); if len == 16
buffer[0] ^= len as u8; let recovered = &buffer[..len]; println!("recovered: {:x?}", recovered); println!("hash: {:x?}", ComputeGlyphHash(recovered)); println!(); } pub fn check_alphanum(bytes: Simd<u8, 16>) -> bool { // check if the characters are outside of '0'..'z' range if (bytes - Simd::splat(b'0')).lanes_gt(Simd::splat(b'z' - b'0')).any() { return false; } // check if the characters are in of '9'+1..'A'-1 range if (bytes - Simd::splat(b'9' + 1)).lanes_lt(Simd::splat(b'A' - (b'9' + 1))).any() { return false; } // check if the characters are in of 'Z'+1..'a'-1 range if (bytes - Simd::splat(b'Z' + 1)).lanes_lt(Simd::splat(b'a' - (b'Z' + 1))).any() { return false; } return true; } use core::sync::atomic::{AtomicBool, Ordering}; static FOUND: AtomicBool = AtomicBool::new(false); fn find_ascii_zeros(suffix: &[u8], worker: u64) { const ATTACK_BYTES: usize = 6; let mut target_hash = Simd::<u8, 16>::splat(0); let mut bsuffix = suffix; let suffix_len = 16 - ATTACK_BYTES; let mut whole_block = false; if suffix.len() >= suffix_len { target_hash = preimage_prefix_hash(target_hash, &suffix[suffix_len..]); bsuffix = &suffix[..suffix_len]; whole_block = true; } let mut controlled = [0u8; 16]; let total_len = ATTACK_BYTES + suffix.len(); let controlled_bytes = total_len.min(16); let controlled = &mut controlled[..controlled_bytes]; controlled[ATTACK_BYTES..].copy_from_slice(bsuffix); let seed = Simd::from_array([ 17820195240, 4041143216, 22093178114, 2324176188, ]); let mut rng = SRng::new(seed * Simd::splat(worker + 1)); let start = std::time::Instant::now(); for ii in 0_u64.. { if FOUND.load(Ordering::Relaxed) { return; } let prefix = rng.random_alphanum(); controlled[..6].copy_from_slice(&prefix[..6]); let prefix = { let prefix_hash = if whole_block { invert_block(target_hash, controlled) } else { preimage_prefix_hash(target_hash, controlled) }; single_prefix(total_len, prefix_hash) }; if check_alphanum(prefix) { FOUND.store(true, Ordering::Relaxed); let mut buffer = prefix.to_array().to_vec(); buffer.extend_from_slice(&controlled[..6]); buffer.extend_from_slice(suffix); let elapsed = start.elapsed(); let mhs = (ii as f64) / 1e6 / elapsed.as_secs_f64(); eprintln!("found prefix in {}it {:?} {:3.3}MH/s/core", ii, elapsed, mhs); eprintln!("hash: {:x?}", ComputeGlyphHash(&buffer)); println!("{}", core::str::from_utf8(&buffer).unwrap()); break; } } } const MESSAGE: &[&[u8]] = &[ b" Hello Casey! I hope this message finds you well.", b" Please ignore those 22 random chars to the left for now.", b" The work you've done on refterm is admirable. There are", b" not enough performance conscious programmers around, and", b" we need a demonstration of what is achievable. However,", b" I would like to address the claim that the hash function", b" used in refterm is 'cryptographically secure'. There is", b" a very specific meaning attached to those words, namely:", b" 1) it is hard to create a message for a given hash value", b" 2) it is hard to produce two messages with the same hash", b" If you check, the following strings have the same hash:", b" xvD7FsaUdGy9UyjalZlFEU, 0XXPpB0wpVszsvSxgsn0su,", b" IGNwdjol0dxLflcnfW7vsI, jcTHx0zBJbW2tdiX157RSz.", b" In fact, every line in the message yields the exact same", b" hash value. That is 0x00000000000000000000000000000000.", b" I believe this was a clear enough demonstration that the", b" hash function `ComputeGlyphHash` isn't cryptographically", b" secure, and that an attacker can corrupt the glyph cache", b" by printing glyphs with the same hash. The main problem", b" with this hash function is that all operations consuming", b" bytes are invertible. Which means an attacker could run", b" the hash function in reverse, consuming the message from", b" behind, and calculate the message to get the given hash.", b" The hash is also weak to a padding attack. For example,", br#" two strings "A" and "B\x00" yield the same hash, because"#, b" the padding is constant, so zero byte in the end doens't", b" matter, and the first byte is `xor`ed with input length.", b" If you'd like to, you can read this blog post explaining", b" these attacks in detail and how to avoid them using well", b" known methods: https://m1el.github.io/refterm-hash", b" Best regards, -- Igor", ]; fn main() { padding_attack(); invert_attack(b"Qwerty123"); prefix_collision_attack(b"hello"); chosen_prefix(b"hello"); preimage_attack(b"hello"); const THREADS: u64 = 16; for msg in MESSAGE { FOUND.store(false, Ordering::Relaxed); let threads = (0..THREADS) .map(|worker| std::thread::spawn(move || find_ascii_zeros(msg, worker))) .collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }; }
{ println!("the plaintext mus be shorter than 16 bytes, cannot invert"); return; }
conditional_block
lib.rs
//! The crate serves as an bindings to the official (outdated) //! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) //! The crate has been made so you can call make calls directly and get a result back in a Struct. //! //! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2). //! //! Use [xpaw](https://steamapi.xpaw.me/#) for latest. //! //! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`. //! //! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey). //! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain" //! > field is just a note. It's not actually used for anything and is just a helpful field so you can //! > tell us what your website is. You can just put your name in for now. Once you get a key, its what //! > uniquely identifies you when accessing our WebAPI calls. //! //! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of //! you token pass first to use the library, there is no calls without the token. //! ```rust //! //main.rs //! use dota2_webapi_bindings::Dota2Api; //! static DOTA2_KEY: &str = "0123456789"; //example token //! //! fn main() { //! let mut dota = Dota2Api::new(String::from(DOTA2_KEY)); //! // we use `set` to configure the URL first //! dota.set_heroes().itemized_only(true).language("zh_zh"); //! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);` //! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);` //! // our builder like function takes care of optional parameters //! //! // and finally `get` to retrieve our struct //! let data = dota.get_heroes().expect("something went wrong, ez mid"); //! } //! //! ``` //! //! ##### Available calls : //! * IEconDOTA2_570 //! * GetGameItems //! * GetHeroes //! * GetRarities //! * GetTournamentPrizePool //! * IDOTA2Match_205790 //! * GetLeagueListing //! * IDOTA2Match_570 //! * GetLiveLeagueGames //! * GetTopLiveGame //! //! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name //! and description for some reason, I have not set-up a default cause sometimes that might not be your intension. #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde_json; pub mod dota; use hyper::status::StatusCode; use hyper::Client; use std::io::Read; use crate::dota::{ get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*, get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*, }; /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. /// /// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name /// and description for some reason macro_rules! language { () => { pub fn language(&mut self, param_value: &str) -> &mut Self { self.url.push_str(&*format!("language={}&", param_value)); self } }; } /// A `set!` macro to get our `set` functions macro_rules! set { ($func: ident, $builder: ident, $build: ident) => { pub fn $func(&mut self) -> &mut $build { self.$builder = $build::build(&*self.key); &mut self.$builder } }; } /// A `get!` macro to get our `get` functions macro_rules! get { ($func: ident, $return_type: ident, $builder: ident, $result: ident) => { pub fn $func(&mut self) -> Result<$return_type, Error> { let response = self.get(&*self.$builder.url.clone())?; let data_result: $result = serde_json::from_str(response.as_str())?; let data = data_result.result; Ok(data) } }; } /// builder to reduce boilerplate macro_rules! builder { ($builder: ident, $url: expr) => { #[derive(Debug, Default)] pub struct $builder { url: String, } impl $builder { fn build(key: &str) -> Self { Self { url: format!($url, key), } } } }; } /// different type of errors we can receive during either fetching of data or just unpacking JSON #[derive(Debug)] pub enum Error { Http(hyper::Error), Json(serde_json::Error), Forbidden(&'static str), Message(String), } impl From<hyper::Error> for Error { fn from(e: hyper::Error) -> Error
} impl From<serde_json::Error> for Error { fn from(e: serde_json::Error) -> Error { Error::Json(e) } } /// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need) /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. #[derive(Debug, Default)] pub struct Dota2Api { http_client: Client, pub key: String, get_heroes_builder: GetHeroesBuilder, get_game_items_builder: GetGameItemsBuilder, get_rarities_builder: GetRaritiesBuilder, get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder, get_league_listing_builder: GetLeagueListingBuilder, get_live_league_games_builder: GetLiveLeagueGamesBuilder, get_top_live_game_builder: GetTopLiveGameBuilder, } impl Dota2Api { pub fn new(key: String) -> Self { Dota2Api { http_client: Client::new(), key, ..Default::default() } } set!(set_heroes, get_heroes_builder, GetHeroesBuilder); // use `set` before `get` get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult); set!(set_game_items, get_game_items_builder, GetGameItemsBuilder); // use `set` before `get` get!( get_game_items, GetGameItems, get_game_items_builder, GetGameItemsResult ); set!(set_rarities, get_rarities_builder, GetRaritiesBuilder); // use `set` before `get` get!( get_rarities, GetRarities, get_rarities_builder, GetRaritiesResult ); set!( set_tournament_prize_pool, get_tournament_prize_pool_builder, GetTournamentPrizePoolBuilder ); // use `set` before `get` get!( get_tournament_prize_pool, GetTournamentPrizePool, get_tournament_prize_pool_builder, GetTournamentPrizePoolResult ); set!( set_league_listing, get_league_listing_builder, GetLeagueListingBuilder ); // use `set` before `get` get!( get_league_listing, GetLeagueListing, get_league_listing_builder, GetLeagueListingResult ); set!( set_live_league_games, get_live_league_games_builder, GetLiveLeagueGamesBuilder ); // use `set` before `get` get!( get_live_league_games, GetLiveLeagueGames, get_live_league_games_builder, GetLiveLeagueGamesResult ); set!( set_top_live_game, get_top_live_game_builder, GetTopLiveGameBuilder ); // use `set` before `get` pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> { let response = self.get(&*self.get_top_live_game_builder.url.clone())?; let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?; let data = data_result; Ok(data) } /// our get function to actually get the data from the api fn get(&mut self, url: &str) -> Result<String, Error> { let mut response = self.http_client.get(url).send()?; let mut temp = String::new(); if response.status == StatusCode::Forbidden { return Err(Error::Forbidden( "Access is denied. Retrying will not help. Please check your API key.", )); } let _ = response.read_to_string(&mut temp); Ok(temp) } } //============================================================================== //IEconDOTA2_570 //============================================================================== builder!( GetHeroesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&" ); impl GetHeroesBuilder { /// itemizedonly (Optional) (bool) : Return a list of itemized heroes only. pub fn itemized_only(&mut self, param_value: bool) -> &mut Self { self.url .push_str(&*format!("itemizedonly={}&", param_value)); self } language!(); } builder!( GetGameItemsBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&" ); impl GetGameItemsBuilder { language!(); } builder!( GetRaritiesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&" ); impl GetRaritiesBuilder { language!(); } builder!( GetTournamentPrizePoolBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&" ); impl GetTournamentPrizePoolBuilder { /// leagueid (Optional) (int) : The ID of the league to get the prize pool of. pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("leagueid={}&", param_value)); self } language!(); } //============================================================================== //IDOTA2Match_205790 //============================================================================== builder!( GetLeagueListingBuilder, "http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&" ); impl GetLeagueListingBuilder { language!(); } //============================================================================== //IDOTA2Match_570 //============================================================================== builder!( GetLiveLeagueGamesBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&" ); impl GetLiveLeagueGamesBuilder { language!(); /// Only show matches of the specified league id pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("league_id={}&", param_value)); self } /// Only show matches of the specified match id pub fn match_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("match_id={}&", param_value)); self } } builder!( GetTopLiveGameBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&" ); impl GetTopLiveGameBuilder { language!(); /// Which partner's games to use pub fn partner(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("partner={}&", param_value)); self } }
{ Error::Http(e) }
identifier_body
lib.rs
//! The crate serves as an bindings to the official (outdated) //! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) //! The crate has been made so you can call make calls directly and get a result back in a Struct. //! //! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2). //! //! Use [xpaw](https://steamapi.xpaw.me/#) for latest. //! //! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`. //! //! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey). //! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain" //! > field is just a note. It's not actually used for anything and is just a helpful field so you can //! > tell us what your website is. You can just put your name in for now. Once you get a key, its what //! > uniquely identifies you when accessing our WebAPI calls. //! //! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of //! you token pass first to use the library, there is no calls without the token. //! ```rust //! //main.rs //! use dota2_webapi_bindings::Dota2Api; //! static DOTA2_KEY: &str = "0123456789"; //example token //! //! fn main() { //! let mut dota = Dota2Api::new(String::from(DOTA2_KEY)); //! // we use `set` to configure the URL first //! dota.set_heroes().itemized_only(true).language("zh_zh"); //! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);` //! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);` //! // our builder like function takes care of optional parameters //! //! // and finally `get` to retrieve our struct //! let data = dota.get_heroes().expect("something went wrong, ez mid"); //! } //! //! ``` //! //! ##### Available calls : //! * IEconDOTA2_570 //! * GetGameItems //! * GetHeroes //! * GetRarities //! * GetTournamentPrizePool //! * IDOTA2Match_205790 //! * GetLeagueListing //! * IDOTA2Match_570 //! * GetLiveLeagueGames //! * GetTopLiveGame //! //! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name //! and description for some reason, I have not set-up a default cause sometimes that might not be your intension. #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde_json; pub mod dota; use hyper::status::StatusCode; use hyper::Client; use std::io::Read; use crate::dota::{ get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*, get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*, }; /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. /// /// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name /// and description for some reason macro_rules! language { () => { pub fn language(&mut self, param_value: &str) -> &mut Self { self.url.push_str(&*format!("language={}&", param_value)); self } }; } /// A `set!` macro to get our `set` functions macro_rules! set { ($func: ident, $builder: ident, $build: ident) => {
self.$builder = $build::build(&*self.key); &mut self.$builder } }; } /// A `get!` macro to get our `get` functions macro_rules! get { ($func: ident, $return_type: ident, $builder: ident, $result: ident) => { pub fn $func(&mut self) -> Result<$return_type, Error> { let response = self.get(&*self.$builder.url.clone())?; let data_result: $result = serde_json::from_str(response.as_str())?; let data = data_result.result; Ok(data) } }; } /// builder to reduce boilerplate macro_rules! builder { ($builder: ident, $url: expr) => { #[derive(Debug, Default)] pub struct $builder { url: String, } impl $builder { fn build(key: &str) -> Self { Self { url: format!($url, key), } } } }; } /// different type of errors we can receive during either fetching of data or just unpacking JSON #[derive(Debug)] pub enum Error { Http(hyper::Error), Json(serde_json::Error), Forbidden(&'static str), Message(String), } impl From<hyper::Error> for Error { fn from(e: hyper::Error) -> Error { Error::Http(e) } } impl From<serde_json::Error> for Error { fn from(e: serde_json::Error) -> Error { Error::Json(e) } } /// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need) /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. #[derive(Debug, Default)] pub struct Dota2Api { http_client: Client, pub key: String, get_heroes_builder: GetHeroesBuilder, get_game_items_builder: GetGameItemsBuilder, get_rarities_builder: GetRaritiesBuilder, get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder, get_league_listing_builder: GetLeagueListingBuilder, get_live_league_games_builder: GetLiveLeagueGamesBuilder, get_top_live_game_builder: GetTopLiveGameBuilder, } impl Dota2Api { pub fn new(key: String) -> Self { Dota2Api { http_client: Client::new(), key, ..Default::default() } } set!(set_heroes, get_heroes_builder, GetHeroesBuilder); // use `set` before `get` get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult); set!(set_game_items, get_game_items_builder, GetGameItemsBuilder); // use `set` before `get` get!( get_game_items, GetGameItems, get_game_items_builder, GetGameItemsResult ); set!(set_rarities, get_rarities_builder, GetRaritiesBuilder); // use `set` before `get` get!( get_rarities, GetRarities, get_rarities_builder, GetRaritiesResult ); set!( set_tournament_prize_pool, get_tournament_prize_pool_builder, GetTournamentPrizePoolBuilder ); // use `set` before `get` get!( get_tournament_prize_pool, GetTournamentPrizePool, get_tournament_prize_pool_builder, GetTournamentPrizePoolResult ); set!( set_league_listing, get_league_listing_builder, GetLeagueListingBuilder ); // use `set` before `get` get!( get_league_listing, GetLeagueListing, get_league_listing_builder, GetLeagueListingResult ); set!( set_live_league_games, get_live_league_games_builder, GetLiveLeagueGamesBuilder ); // use `set` before `get` get!( get_live_league_games, GetLiveLeagueGames, get_live_league_games_builder, GetLiveLeagueGamesResult ); set!( set_top_live_game, get_top_live_game_builder, GetTopLiveGameBuilder ); // use `set` before `get` pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> { let response = self.get(&*self.get_top_live_game_builder.url.clone())?; let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?; let data = data_result; Ok(data) } /// our get function to actually get the data from the api fn get(&mut self, url: &str) -> Result<String, Error> { let mut response = self.http_client.get(url).send()?; let mut temp = String::new(); if response.status == StatusCode::Forbidden { return Err(Error::Forbidden( "Access is denied. Retrying will not help. Please check your API key.", )); } let _ = response.read_to_string(&mut temp); Ok(temp) } } //============================================================================== //IEconDOTA2_570 //============================================================================== builder!( GetHeroesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&" ); impl GetHeroesBuilder { /// itemizedonly (Optional) (bool) : Return a list of itemized heroes only. pub fn itemized_only(&mut self, param_value: bool) -> &mut Self { self.url .push_str(&*format!("itemizedonly={}&", param_value)); self } language!(); } builder!( GetGameItemsBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&" ); impl GetGameItemsBuilder { language!(); } builder!( GetRaritiesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&" ); impl GetRaritiesBuilder { language!(); } builder!( GetTournamentPrizePoolBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&" ); impl GetTournamentPrizePoolBuilder { /// leagueid (Optional) (int) : The ID of the league to get the prize pool of. pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("leagueid={}&", param_value)); self } language!(); } //============================================================================== //IDOTA2Match_205790 //============================================================================== builder!( GetLeagueListingBuilder, "http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&" ); impl GetLeagueListingBuilder { language!(); } //============================================================================== //IDOTA2Match_570 //============================================================================== builder!( GetLiveLeagueGamesBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&" ); impl GetLiveLeagueGamesBuilder { language!(); /// Only show matches of the specified league id pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("league_id={}&", param_value)); self } /// Only show matches of the specified match id pub fn match_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("match_id={}&", param_value)); self } } builder!( GetTopLiveGameBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&" ); impl GetTopLiveGameBuilder { language!(); /// Which partner's games to use pub fn partner(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("partner={}&", param_value)); self } }
pub fn $func(&mut self) -> &mut $build {
random_line_split
lib.rs
//! The crate serves as an bindings to the official (outdated) //! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) //! The crate has been made so you can call make calls directly and get a result back in a Struct. //! //! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2). //! //! Use [xpaw](https://steamapi.xpaw.me/#) for latest. //! //! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`. //! //! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey). //! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain" //! > field is just a note. It's not actually used for anything and is just a helpful field so you can //! > tell us what your website is. You can just put your name in for now. Once you get a key, its what //! > uniquely identifies you when accessing our WebAPI calls. //! //! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of //! you token pass first to use the library, there is no calls without the token. //! ```rust //! //main.rs //! use dota2_webapi_bindings::Dota2Api; //! static DOTA2_KEY: &str = "0123456789"; //example token //! //! fn main() { //! let mut dota = Dota2Api::new(String::from(DOTA2_KEY)); //! // we use `set` to configure the URL first //! dota.set_heroes().itemized_only(true).language("zh_zh"); //! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);` //! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);` //! // our builder like function takes care of optional parameters //! //! // and finally `get` to retrieve our struct //! let data = dota.get_heroes().expect("something went wrong, ez mid"); //! } //! //! ``` //! //! ##### Available calls : //! * IEconDOTA2_570 //! * GetGameItems //! * GetHeroes //! * GetRarities //! * GetTournamentPrizePool //! * IDOTA2Match_205790 //! * GetLeagueListing //! * IDOTA2Match_570 //! * GetLiveLeagueGames //! * GetTopLiveGame //! //! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name //! and description for some reason, I have not set-up a default cause sometimes that might not be your intension. #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde_json; pub mod dota; use hyper::status::StatusCode; use hyper::Client; use std::io::Read; use crate::dota::{ get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*, get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*, }; /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. /// /// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name /// and description for some reason macro_rules! language { () => { pub fn language(&mut self, param_value: &str) -> &mut Self { self.url.push_str(&*format!("language={}&", param_value)); self } }; } /// A `set!` macro to get our `set` functions macro_rules! set { ($func: ident, $builder: ident, $build: ident) => { pub fn $func(&mut self) -> &mut $build { self.$builder = $build::build(&*self.key); &mut self.$builder } }; } /// A `get!` macro to get our `get` functions macro_rules! get { ($func: ident, $return_type: ident, $builder: ident, $result: ident) => { pub fn $func(&mut self) -> Result<$return_type, Error> { let response = self.get(&*self.$builder.url.clone())?; let data_result: $result = serde_json::from_str(response.as_str())?; let data = data_result.result; Ok(data) } }; } /// builder to reduce boilerplate macro_rules! builder { ($builder: ident, $url: expr) => { #[derive(Debug, Default)] pub struct $builder { url: String, } impl $builder { fn build(key: &str) -> Self { Self { url: format!($url, key), } } } }; } /// different type of errors we can receive during either fetching of data or just unpacking JSON #[derive(Debug)] pub enum Error { Http(hyper::Error), Json(serde_json::Error), Forbidden(&'static str), Message(String), } impl From<hyper::Error> for Error { fn from(e: hyper::Error) -> Error { Error::Http(e) } } impl From<serde_json::Error> for Error { fn from(e: serde_json::Error) -> Error { Error::Json(e) } } /// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need) /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. #[derive(Debug, Default)] pub struct Dota2Api { http_client: Client, pub key: String, get_heroes_builder: GetHeroesBuilder, get_game_items_builder: GetGameItemsBuilder, get_rarities_builder: GetRaritiesBuilder, get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder, get_league_listing_builder: GetLeagueListingBuilder, get_live_league_games_builder: GetLiveLeagueGamesBuilder, get_top_live_game_builder: GetTopLiveGameBuilder, } impl Dota2Api { pub fn new(key: String) -> Self { Dota2Api { http_client: Client::new(), key, ..Default::default() } } set!(set_heroes, get_heroes_builder, GetHeroesBuilder); // use `set` before `get` get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult); set!(set_game_items, get_game_items_builder, GetGameItemsBuilder); // use `set` before `get` get!( get_game_items, GetGameItems, get_game_items_builder, GetGameItemsResult ); set!(set_rarities, get_rarities_builder, GetRaritiesBuilder); // use `set` before `get` get!( get_rarities, GetRarities, get_rarities_builder, GetRaritiesResult ); set!( set_tournament_prize_pool, get_tournament_prize_pool_builder, GetTournamentPrizePoolBuilder ); // use `set` before `get` get!( get_tournament_prize_pool, GetTournamentPrizePool, get_tournament_prize_pool_builder, GetTournamentPrizePoolResult ); set!( set_league_listing, get_league_listing_builder, GetLeagueListingBuilder ); // use `set` before `get` get!( get_league_listing, GetLeagueListing, get_league_listing_builder, GetLeagueListingResult ); set!( set_live_league_games, get_live_league_games_builder, GetLiveLeagueGamesBuilder ); // use `set` before `get` get!( get_live_league_games, GetLiveLeagueGames, get_live_league_games_builder, GetLiveLeagueGamesResult ); set!( set_top_live_game, get_top_live_game_builder, GetTopLiveGameBuilder ); // use `set` before `get` pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> { let response = self.get(&*self.get_top_live_game_builder.url.clone())?; let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?; let data = data_result; Ok(data) } /// our get function to actually get the data from the api fn get(&mut self, url: &str) -> Result<String, Error> { let mut response = self.http_client.get(url).send()?; let mut temp = String::new(); if response.status == StatusCode::Forbidden { return Err(Error::Forbidden( "Access is denied. Retrying will not help. Please check your API key.", )); } let _ = response.read_to_string(&mut temp); Ok(temp) } } //============================================================================== //IEconDOTA2_570 //============================================================================== builder!( GetHeroesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&" ); impl GetHeroesBuilder { /// itemizedonly (Optional) (bool) : Return a list of itemized heroes only. pub fn itemized_only(&mut self, param_value: bool) -> &mut Self { self.url .push_str(&*format!("itemizedonly={}&", param_value)); self } language!(); } builder!( GetGameItemsBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&" ); impl GetGameItemsBuilder { language!(); } builder!( GetRaritiesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&" ); impl GetRaritiesBuilder { language!(); } builder!( GetTournamentPrizePoolBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&" ); impl GetTournamentPrizePoolBuilder { /// leagueid (Optional) (int) : The ID of the league to get the prize pool of. pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("leagueid={}&", param_value)); self } language!(); } //============================================================================== //IDOTA2Match_205790 //============================================================================== builder!( GetLeagueListingBuilder, "http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&" ); impl GetLeagueListingBuilder { language!(); } //============================================================================== //IDOTA2Match_570 //============================================================================== builder!( GetLiveLeagueGamesBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&" ); impl GetLiveLeagueGamesBuilder { language!(); /// Only show matches of the specified league id pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("league_id={}&", param_value)); self } /// Only show matches of the specified match id pub fn
(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("match_id={}&", param_value)); self } } builder!( GetTopLiveGameBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&" ); impl GetTopLiveGameBuilder { language!(); /// Which partner's games to use pub fn partner(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("partner={}&", param_value)); self } }
match_id
identifier_name
lib.rs
//! The crate serves as an bindings to the official (outdated) //! [dota2 webapi](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) //! The crate has been made so you can call make calls directly and get a result back in a Struct. //! //! Read the full list of api(outdated) calls [here](https://wiki.teamfortress.com/wiki/WebAPI#Dota_2). //! //! Use [xpaw](https://steamapi.xpaw.me/#) for latest. //! //! The webapi terms are same as official except they are all in lowercase, Eg : `GetGameItems` is now `get_game_items()`. //! //! You also need a key that you can get [here](http://steamcommunity.com/dev/apikey). //! > Originally posted by Zoid at [forum](https://dev.dota2.com/forum/dota-2/spectating/replays/webapi/60177-things-you-should-know-before-starting?t=58317) When you go to http://steamcommunity.com/dev/apikey the "domain" //! > field is just a note. It's not actually used for anything and is just a helpful field so you can //! > tell us what your website is. You can just put your name in for now. Once you get a key, its what //! > uniquely identifies you when accessing our WebAPI calls. //! //! In your `main.rs` or anywhere you intend to use the library create a non-mutable string of //! you token pass first to use the library, there is no calls without the token. //! ```rust //! //main.rs //! use dota2_webapi_bindings::Dota2Api; //! static DOTA2_KEY: &str = "0123456789"; //example token //! //! fn main() { //! let mut dota = Dota2Api::new(String::from(DOTA2_KEY)); //! // we use `set` to configure the URL first //! dota.set_heroes().itemized_only(true).language("zh_zh"); //! // you can also write the above as just `dota.set_heroes();` or `dota.set_heroes().itemized_only(true);` //! // or just `dota.set_heroes().language("zh_zh");` or `dota.set_heroes().language("zh_zh").itemized_only(true);` //! // our builder like function takes care of optional parameters //! //! // and finally `get` to retrieve our struct //! let data = dota.get_heroes().expect("something went wrong, ez mid"); //! } //! //! ``` //! //! ##### Available calls : //! * IEconDOTA2_570 //! * GetGameItems //! * GetHeroes //! * GetRarities //! * GetTournamentPrizePool //! * IDOTA2Match_205790 //! * GetLeagueListing //! * IDOTA2Match_570 //! * GetLiveLeagueGames //! * GetTopLiveGame //! //! **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name //! and description for some reason, I have not set-up a default cause sometimes that might not be your intension. #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde_json; pub mod dota; use hyper::status::StatusCode; use hyper::Client; use std::io::Read; use crate::dota::{ get_game_items::*, get_heroes::*, get_league_listing::*, get_live_league_games::*, get_rarities::*, get_top_live_game::*, get_tournament_prize_pool::*, }; /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. /// /// **Note:** Try using `language()` with everything, just put in any string, it seems like its gives better readable name /// and description for some reason macro_rules! language { () => { pub fn language(&mut self, param_value: &str) -> &mut Self { self.url.push_str(&*format!("language={}&", param_value)); self } }; } /// A `set!` macro to get our `set` functions macro_rules! set { ($func: ident, $builder: ident, $build: ident) => { pub fn $func(&mut self) -> &mut $build { self.$builder = $build::build(&*self.key); &mut self.$builder } }; } /// A `get!` macro to get our `get` functions macro_rules! get { ($func: ident, $return_type: ident, $builder: ident, $result: ident) => { pub fn $func(&mut self) -> Result<$return_type, Error> { let response = self.get(&*self.$builder.url.clone())?; let data_result: $result = serde_json::from_str(response.as_str())?; let data = data_result.result; Ok(data) } }; } /// builder to reduce boilerplate macro_rules! builder { ($builder: ident, $url: expr) => { #[derive(Debug, Default)] pub struct $builder { url: String, } impl $builder { fn build(key: &str) -> Self { Self { url: format!($url, key), } } } }; } /// different type of errors we can receive during either fetching of data or just unpacking JSON #[derive(Debug)] pub enum Error { Http(hyper::Error), Json(serde_json::Error), Forbidden(&'static str), Message(String), } impl From<hyper::Error> for Error { fn from(e: hyper::Error) -> Error { Error::Http(e) } } impl From<serde_json::Error> for Error { fn from(e: serde_json::Error) -> Error { Error::Json(e) } } /// The main `Dota2Api` of you library works by saving states of all the invoked URLs (you only call the one you need) /// language macro for easy implementation in various builder struct /// /// The language to retrieve results in (default is en_us) (see http://en.wikipedia.org/wiki/ISO_639-1 for /// the language codes (first two characters) and http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes for /// the country codes (last two characters)) /// /// language (Optional) (string) : The language to provide output in. #[derive(Debug, Default)] pub struct Dota2Api { http_client: Client, pub key: String, get_heroes_builder: GetHeroesBuilder, get_game_items_builder: GetGameItemsBuilder, get_rarities_builder: GetRaritiesBuilder, get_tournament_prize_pool_builder: GetTournamentPrizePoolBuilder, get_league_listing_builder: GetLeagueListingBuilder, get_live_league_games_builder: GetLiveLeagueGamesBuilder, get_top_live_game_builder: GetTopLiveGameBuilder, } impl Dota2Api { pub fn new(key: String) -> Self { Dota2Api { http_client: Client::new(), key, ..Default::default() } } set!(set_heroes, get_heroes_builder, GetHeroesBuilder); // use `set` before `get` get!(get_heroes, GetHeroes, get_heroes_builder, GetHeroesResult); set!(set_game_items, get_game_items_builder, GetGameItemsBuilder); // use `set` before `get` get!( get_game_items, GetGameItems, get_game_items_builder, GetGameItemsResult ); set!(set_rarities, get_rarities_builder, GetRaritiesBuilder); // use `set` before `get` get!( get_rarities, GetRarities, get_rarities_builder, GetRaritiesResult ); set!( set_tournament_prize_pool, get_tournament_prize_pool_builder, GetTournamentPrizePoolBuilder ); // use `set` before `get` get!( get_tournament_prize_pool, GetTournamentPrizePool, get_tournament_prize_pool_builder, GetTournamentPrizePoolResult ); set!( set_league_listing, get_league_listing_builder, GetLeagueListingBuilder ); // use `set` before `get` get!( get_league_listing, GetLeagueListing, get_league_listing_builder, GetLeagueListingResult ); set!( set_live_league_games, get_live_league_games_builder, GetLiveLeagueGamesBuilder ); // use `set` before `get` get!( get_live_league_games, GetLiveLeagueGames, get_live_league_games_builder, GetLiveLeagueGamesResult ); set!( set_top_live_game, get_top_live_game_builder, GetTopLiveGameBuilder ); // use `set` before `get` pub fn get_top_live_game(&mut self) -> Result<GetTopLiveGame, Error> { let response = self.get(&*self.get_top_live_game_builder.url.clone())?; let data_result: GetTopLiveGame = serde_json::from_str(response.as_str())?; let data = data_result; Ok(data) } /// our get function to actually get the data from the api fn get(&mut self, url: &str) -> Result<String, Error> { let mut response = self.http_client.get(url).send()?; let mut temp = String::new(); if response.status == StatusCode::Forbidden
let _ = response.read_to_string(&mut temp); Ok(temp) } } //============================================================================== //IEconDOTA2_570 //============================================================================== builder!( GetHeroesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetHeroes/v1/?key={}&" ); impl GetHeroesBuilder { /// itemizedonly (Optional) (bool) : Return a list of itemized heroes only. pub fn itemized_only(&mut self, param_value: bool) -> &mut Self { self.url .push_str(&*format!("itemizedonly={}&", param_value)); self } language!(); } builder!( GetGameItemsBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetGameItems/v1/?key={}&" ); impl GetGameItemsBuilder { language!(); } builder!( GetRaritiesBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetRarities/v1/?key={}&" ); impl GetRaritiesBuilder { language!(); } builder!( GetTournamentPrizePoolBuilder, "http://api.steampowered.com/IEconDOTA2_570/GetTournamentPrizePool/v1/?key={}&" ); impl GetTournamentPrizePoolBuilder { /// leagueid (Optional) (int) : The ID of the league to get the prize pool of. pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("leagueid={}&", param_value)); self } language!(); } //============================================================================== //IDOTA2Match_205790 //============================================================================== builder!( GetLeagueListingBuilder, "http://api.steampowered.com/IDOTA2Match_205790/GetLeagueListing/v1/?key={}&" ); impl GetLeagueListingBuilder { language!(); } //============================================================================== //IDOTA2Match_570 //============================================================================== builder!( GetLiveLeagueGamesBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetLiveLeagueGames/v1/?key={}&" ); impl GetLiveLeagueGamesBuilder { language!(); /// Only show matches of the specified league id pub fn league_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("league_id={}&", param_value)); self } /// Only show matches of the specified match id pub fn match_id(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("match_id={}&", param_value)); self } } builder!( GetTopLiveGameBuilder, "http://api.steampowered.com/IDOTA2Match_570/GetTopLiveGame/v1/?key={}&" ); impl GetTopLiveGameBuilder { language!(); /// Which partner's games to use pub fn partner(&mut self, param_value: usize) -> &mut Self { self.url.push_str(&*format!("partner={}&", param_value)); self } }
{ return Err(Error::Forbidden( "Access is denied. Retrying will not help. Please check your API key.", )); }
conditional_block
lib.rs
//! Error handling layer for axum that supports extractors and async functions. //! //! This crate provides [`HandleErrorLayer`] which works similarly to //! [`axum::error_handling::HandleErrorLayer`] except that it supports //! extractors and async functions: //! //! ```rust //! use axum::{ //! Router, //! BoxError, //! response::IntoResponse, //! http::{StatusCode, Method, Uri}, //! routing::get, //! }; //! use tower::{ServiceBuilder, timeout::error::Elapsed}; //! use std::time::Duration; //! use axum_handle_error_extract::HandleErrorLayer; //! //! let app = Router::new() //! .route("/", get(|| async {})) //! .layer( //! ServiceBuilder::new() //! // timeouts produces errors, so we handle those with `handle_error` //! .layer(HandleErrorLayer::new(handle_error)) //! .timeout(Duration::from_secs(10)) //! ); //! //! // our handler take can 0 to 16 extractors and the final argument must //! // always be the error produced by the middleware //! async fn handle_error( //! method: Method, //! uri: Uri, //! error: BoxError, //! ) -> impl IntoResponse { //! if error.is::<Elapsed>() { //! ( //! StatusCode::REQUEST_TIMEOUT, //! format!("{} {} took too long", method, uri), //! ) //! } else { //! ( //! StatusCode::INTERNAL_SERVER_ERROR, //! format!("{} {} failed: {}", method, uri, error), //! ) //! } //! } //! # async { //! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); //! # }; //! ``` //! //! Not running any extractors is also supported: //! //! ```rust //! use axum::{ //! Router, //! BoxError, //! response::IntoResponse, //! http::StatusCode, //! routing::get, //! }; //! use tower::{ServiceBuilder, timeout::error::Elapsed}; //! use std::time::Duration; //! use axum_handle_error_extract::HandleErrorLayer; //! //! let app = Router::new() //! .route("/", get(|| async {})) //! .layer( //! ServiceBuilder::new() //! .layer(HandleErrorLayer::new(handle_error)) //! .timeout(Duration::from_secs(10)) //! ); //! //! // this function just takes the error //! async fn handle_error(error: BoxError) -> impl IntoResponse { //! if error.is::<Elapsed>() { //! ( //! StatusCode::REQUEST_TIMEOUT, //! "Request timeout".to_string(), //! ) //! } else { //! ( //! StatusCode::INTERNAL_SERVER_ERROR, //! format!("Unhandled internal error: {}", error), //! ) //! } //! } //! # async { //! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); //! # }; //! ``` //! //! See [`axum::error_handling`] for more details on axum's error handling model and //! [`axum::extract`] for more details on extractors. //! //! # The future //! //! In axum 0.4 this will replace the current [`axum::error_handling::HandleErrorLayer`]. #![warn( clippy::all, clippy::dbg_macro, clippy::todo, clippy::empty_enum, clippy::enum_glob_use, clippy::mem_forget, clippy::unused_self, clippy::filter_map_next, clippy::needless_continue, clippy::needless_borrow, clippy::match_wildcard_for_single_variants, clippy::if_let_mutex, clippy::mismatched_target_os, clippy::await_holding_lock, clippy::match_on_vec_items,
clippy::suboptimal_flops, clippy::lossy_float_literal, clippy::rest_pat_in_fully_bound_structs, clippy::fn_params_excessive_bools, clippy::exit, clippy::inefficient_to_string, clippy::linkedlist, clippy::macro_use_imports, clippy::option_option, clippy::verbose_file_reads, clippy::unnested_or_patterns, rust_2018_idioms, future_incompatible, nonstandard_style, missing_debug_implementations, missing_docs )] #![deny(unreachable_pub, private_in_public)] #![allow(elided_lifetimes_in_paths, clippy::type_complexity)] #![forbid(unsafe_code)] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(test, allow(clippy::float_cmp))] use axum::{ body::{box_body, BoxBody, Bytes, Full, HttpBody}, extract::{FromRequest, RequestParts}, http::{Request, Response, StatusCode}, response::IntoResponse, BoxError, }; use pin_project_lite::pin_project; use std::{ convert::Infallible, fmt, future::Future, marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use tower::ServiceExt; use tower_layer::Layer; use tower_service::Service; /// [`Layer`] that applies [`HandleError`] which is a [`Service`] adapter /// that handles errors by converting them into responses. /// /// See [module docs](self) for more details on axum's error handling model. pub struct HandleErrorLayer<F, T> { f: F, _extractor: PhantomData<fn() -> T>, } impl<F, T> HandleErrorLayer<F, T> { /// Create a new `HandleErrorLayer`. pub fn new(f: F) -> Self { Self { f, _extractor: PhantomData, } } } impl<F, T> Clone for HandleErrorLayer<F, T> where F: Clone, { fn clone(&self) -> Self { Self { f: self.f.clone(), _extractor: PhantomData, } } } impl<F, E> fmt::Debug for HandleErrorLayer<F, E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HandleErrorLayer") .field("f", &format_args!("{}", std::any::type_name::<F>())) .finish() } } impl<S, F, T> Layer<S> for HandleErrorLayer<F, T> where F: Clone, { type Service = HandleError<S, F, T>; fn layer(&self, inner: S) -> Self::Service { HandleError::new(inner, self.f.clone()) } } /// A [`Service`] adapter that handles errors by converting them into responses. /// /// See [module docs](self) for more details on axum's error handling model. pub struct HandleError<S, F, T> { inner: S, f: F, _extractor: PhantomData<fn() -> T>, } impl<S, F, T> HandleError<S, F, T> { /// Create a new `HandleError`. pub fn new(inner: S, f: F) -> Self { Self { inner, f, _extractor: PhantomData, } } } impl<S, F, T> Clone for HandleError<S, F, T> where S: Clone, F: Clone, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), f: self.f.clone(), _extractor: PhantomData, } } } impl<S, F, E> fmt::Debug for HandleError<S, F, E> where S: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HandleError") .field("inner", &self.inner) .field("f", &format_args!("{}", std::any::type_name::<F>())) .finish() } } impl<S, F, ReqBody, ResBody, Fut, Res> Service<Request<ReqBody>> for HandleError<S, F, ()> where S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send +'static, S::Error: Send, S::Future: Send, F: FnOnce(S::Error) -> Fut + Clone + Send +'static, Fut: Future<Output = Res> + Send, Res: IntoResponse, ReqBody: Send +'static, ResBody: HttpBody<Data = Bytes> + Send +'static, ResBody::Error: Into<BoxError>, { type Response = Response<BoxBody>; type Error = Infallible; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request<ReqBody>) -> Self::Future { let f = self.f.clone(); let clone = self.inner.clone(); let inner = std::mem::replace(&mut self.inner, clone); let future = Box::pin(async move { match inner.oneshot(req).await { Ok(res) => Ok(res.map(box_body)), Err(err) => Ok(f(err).await.into_response().map(box_body)), } }); ResponseFuture { future } } } #[allow(unused_macros)] macro_rules! impl_service { ( $($ty:ident),* $(,)? ) => { impl<S, F, ReqBody, ResBody, Res, Fut, $($ty,)*> Service<Request<ReqBody>> for HandleError<S, F, ($($ty,)*)> where S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send +'static, S::Error: Send, S::Future: Send, F: FnOnce($($ty),*, S::Error) -> Fut + Clone + Send +'static, Fut: Future<Output = Res> + Send, Res: IntoResponse, $( $ty: FromRequest<ReqBody> + Send,)* ReqBody: Send +'static, ResBody: HttpBody<Data = Bytes> + Send +'static, ResBody::Error: Into<BoxError>, { type Response = Response<BoxBody>; type Error = Infallible; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } #[allow(non_snake_case)] fn call(&mut self, req: Request<ReqBody>) -> Self::Future { let f = self.f.clone(); let clone = self.inner.clone(); let inner = std::mem::replace(&mut self.inner, clone); let future = Box::pin(async move { let mut req = RequestParts::new(req); $( let $ty = match $ty::from_request(&mut req).await { Ok(value) => value, Err(rejection) => return Ok(rejection.into_response().map(box_body)), }; )* let req = match req.try_into_request() { Ok(req) => req, Err(err) => { return Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(box_body(Full::from(err.to_string()))) .unwrap()); } }; match inner.oneshot(req).await { Ok(res) => Ok(res.map(box_body)), Err(err) => Ok(f($($ty),*, err).await.into_response().map(box_body)), } }); ResponseFuture { future } } } } } impl_service!(T1); impl_service!(T1, T2); impl_service!(T1, T2, T3); impl_service!(T1, T2, T3, T4); impl_service!(T1, T2, T3, T4, T5); impl_service!(T1, T2, T3, T4, T5, T6); impl_service!(T1, T2, T3, T4, T5, T6, T7); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16); pin_project! { /// Response future for [`HandleError`]. pub struct ResponseFuture { #[pin] future: Pin<Box<dyn Future<Output = Result<Response<BoxBody>, Infallible>> + Send +'static>>, } } impl Future for ResponseFuture { type Output = Result<Response<BoxBody>, Infallible>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.project().future.poll(cx) } } /// Extension trait to [`Service`] for handling errors by mapping them to /// responses. /// /// See [module docs](self) for more details on axum's error handling model. pub trait HandleErrorExt<B>: Service<Request<B>> + Sized { /// Apply a [`HandleError`] middleware. fn handle_error<F>(self, f: F) -> HandleError<Self, F, B> { HandleError::new(self, f) } } impl<B, S> HandleErrorExt<B> for S where S: Service<Request<B>> {}
clippy::imprecise_flops,
random_line_split
lib.rs
//! Error handling layer for axum that supports extractors and async functions. //! //! This crate provides [`HandleErrorLayer`] which works similarly to //! [`axum::error_handling::HandleErrorLayer`] except that it supports //! extractors and async functions: //! //! ```rust //! use axum::{ //! Router, //! BoxError, //! response::IntoResponse, //! http::{StatusCode, Method, Uri}, //! routing::get, //! }; //! use tower::{ServiceBuilder, timeout::error::Elapsed}; //! use std::time::Duration; //! use axum_handle_error_extract::HandleErrorLayer; //! //! let app = Router::new() //! .route("/", get(|| async {})) //! .layer( //! ServiceBuilder::new() //! // timeouts produces errors, so we handle those with `handle_error` //! .layer(HandleErrorLayer::new(handle_error)) //! .timeout(Duration::from_secs(10)) //! ); //! //! // our handler take can 0 to 16 extractors and the final argument must //! // always be the error produced by the middleware //! async fn handle_error( //! method: Method, //! uri: Uri, //! error: BoxError, //! ) -> impl IntoResponse { //! if error.is::<Elapsed>() { //! ( //! StatusCode::REQUEST_TIMEOUT, //! format!("{} {} took too long", method, uri), //! ) //! } else { //! ( //! StatusCode::INTERNAL_SERVER_ERROR, //! format!("{} {} failed: {}", method, uri, error), //! ) //! } //! } //! # async { //! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); //! # }; //! ``` //! //! Not running any extractors is also supported: //! //! ```rust //! use axum::{ //! Router, //! BoxError, //! response::IntoResponse, //! http::StatusCode, //! routing::get, //! }; //! use tower::{ServiceBuilder, timeout::error::Elapsed}; //! use std::time::Duration; //! use axum_handle_error_extract::HandleErrorLayer; //! //! let app = Router::new() //! .route("/", get(|| async {})) //! .layer( //! ServiceBuilder::new() //! .layer(HandleErrorLayer::new(handle_error)) //! .timeout(Duration::from_secs(10)) //! ); //! //! // this function just takes the error //! async fn handle_error(error: BoxError) -> impl IntoResponse { //! if error.is::<Elapsed>() { //! ( //! StatusCode::REQUEST_TIMEOUT, //! "Request timeout".to_string(), //! ) //! } else { //! ( //! StatusCode::INTERNAL_SERVER_ERROR, //! format!("Unhandled internal error: {}", error), //! ) //! } //! } //! # async { //! # axum::Server::bind(&"".parse().unwrap()).serve(app.into_make_service()).await.unwrap(); //! # }; //! ``` //! //! See [`axum::error_handling`] for more details on axum's error handling model and //! [`axum::extract`] for more details on extractors. //! //! # The future //! //! In axum 0.4 this will replace the current [`axum::error_handling::HandleErrorLayer`]. #![warn( clippy::all, clippy::dbg_macro, clippy::todo, clippy::empty_enum, clippy::enum_glob_use, clippy::mem_forget, clippy::unused_self, clippy::filter_map_next, clippy::needless_continue, clippy::needless_borrow, clippy::match_wildcard_for_single_variants, clippy::if_let_mutex, clippy::mismatched_target_os, clippy::await_holding_lock, clippy::match_on_vec_items, clippy::imprecise_flops, clippy::suboptimal_flops, clippy::lossy_float_literal, clippy::rest_pat_in_fully_bound_structs, clippy::fn_params_excessive_bools, clippy::exit, clippy::inefficient_to_string, clippy::linkedlist, clippy::macro_use_imports, clippy::option_option, clippy::verbose_file_reads, clippy::unnested_or_patterns, rust_2018_idioms, future_incompatible, nonstandard_style, missing_debug_implementations, missing_docs )] #![deny(unreachable_pub, private_in_public)] #![allow(elided_lifetimes_in_paths, clippy::type_complexity)] #![forbid(unsafe_code)] #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(test, allow(clippy::float_cmp))] use axum::{ body::{box_body, BoxBody, Bytes, Full, HttpBody}, extract::{FromRequest, RequestParts}, http::{Request, Response, StatusCode}, response::IntoResponse, BoxError, }; use pin_project_lite::pin_project; use std::{ convert::Infallible, fmt, future::Future, marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use tower::ServiceExt; use tower_layer::Layer; use tower_service::Service; /// [`Layer`] that applies [`HandleError`] which is a [`Service`] adapter /// that handles errors by converting them into responses. /// /// See [module docs](self) for more details on axum's error handling model. pub struct HandleErrorLayer<F, T> { f: F, _extractor: PhantomData<fn() -> T>, } impl<F, T> HandleErrorLayer<F, T> { /// Create a new `HandleErrorLayer`. pub fn new(f: F) -> Self { Self { f, _extractor: PhantomData, } } } impl<F, T> Clone for HandleErrorLayer<F, T> where F: Clone, { fn
(&self) -> Self { Self { f: self.f.clone(), _extractor: PhantomData, } } } impl<F, E> fmt::Debug for HandleErrorLayer<F, E> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HandleErrorLayer") .field("f", &format_args!("{}", std::any::type_name::<F>())) .finish() } } impl<S, F, T> Layer<S> for HandleErrorLayer<F, T> where F: Clone, { type Service = HandleError<S, F, T>; fn layer(&self, inner: S) -> Self::Service { HandleError::new(inner, self.f.clone()) } } /// A [`Service`] adapter that handles errors by converting them into responses. /// /// See [module docs](self) for more details on axum's error handling model. pub struct HandleError<S, F, T> { inner: S, f: F, _extractor: PhantomData<fn() -> T>, } impl<S, F, T> HandleError<S, F, T> { /// Create a new `HandleError`. pub fn new(inner: S, f: F) -> Self { Self { inner, f, _extractor: PhantomData, } } } impl<S, F, T> Clone for HandleError<S, F, T> where S: Clone, F: Clone, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), f: self.f.clone(), _extractor: PhantomData, } } } impl<S, F, E> fmt::Debug for HandleError<S, F, E> where S: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("HandleError") .field("inner", &self.inner) .field("f", &format_args!("{}", std::any::type_name::<F>())) .finish() } } impl<S, F, ReqBody, ResBody, Fut, Res> Service<Request<ReqBody>> for HandleError<S, F, ()> where S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send +'static, S::Error: Send, S::Future: Send, F: FnOnce(S::Error) -> Fut + Clone + Send +'static, Fut: Future<Output = Res> + Send, Res: IntoResponse, ReqBody: Send +'static, ResBody: HttpBody<Data = Bytes> + Send +'static, ResBody::Error: Into<BoxError>, { type Response = Response<BoxBody>; type Error = Infallible; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn call(&mut self, req: Request<ReqBody>) -> Self::Future { let f = self.f.clone(); let clone = self.inner.clone(); let inner = std::mem::replace(&mut self.inner, clone); let future = Box::pin(async move { match inner.oneshot(req).await { Ok(res) => Ok(res.map(box_body)), Err(err) => Ok(f(err).await.into_response().map(box_body)), } }); ResponseFuture { future } } } #[allow(unused_macros)] macro_rules! impl_service { ( $($ty:ident),* $(,)? ) => { impl<S, F, ReqBody, ResBody, Res, Fut, $($ty,)*> Service<Request<ReqBody>> for HandleError<S, F, ($($ty,)*)> where S: Service<Request<ReqBody>, Response = Response<ResBody>> + Clone + Send +'static, S::Error: Send, S::Future: Send, F: FnOnce($($ty),*, S::Error) -> Fut + Clone + Send +'static, Fut: Future<Output = Res> + Send, Res: IntoResponse, $( $ty: FromRequest<ReqBody> + Send,)* ReqBody: Send +'static, ResBody: HttpBody<Data = Bytes> + Send +'static, ResBody::Error: Into<BoxError>, { type Response = Response<BoxBody>; type Error = Infallible; type Future = ResponseFuture; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } #[allow(non_snake_case)] fn call(&mut self, req: Request<ReqBody>) -> Self::Future { let f = self.f.clone(); let clone = self.inner.clone(); let inner = std::mem::replace(&mut self.inner, clone); let future = Box::pin(async move { let mut req = RequestParts::new(req); $( let $ty = match $ty::from_request(&mut req).await { Ok(value) => value, Err(rejection) => return Ok(rejection.into_response().map(box_body)), }; )* let req = match req.try_into_request() { Ok(req) => req, Err(err) => { return Ok(Response::builder() .status(StatusCode::INTERNAL_SERVER_ERROR) .body(box_body(Full::from(err.to_string()))) .unwrap()); } }; match inner.oneshot(req).await { Ok(res) => Ok(res.map(box_body)), Err(err) => Ok(f($($ty),*, err).await.into_response().map(box_body)), } }); ResponseFuture { future } } } } } impl_service!(T1); impl_service!(T1, T2); impl_service!(T1, T2, T3); impl_service!(T1, T2, T3, T4); impl_service!(T1, T2, T3, T4, T5); impl_service!(T1, T2, T3, T4, T5, T6); impl_service!(T1, T2, T3, T4, T5, T6, T7); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15); impl_service!(T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16); pin_project! { /// Response future for [`HandleError`]. pub struct ResponseFuture { #[pin] future: Pin<Box<dyn Future<Output = Result<Response<BoxBody>, Infallible>> + Send +'static>>, } } impl Future for ResponseFuture { type Output = Result<Response<BoxBody>, Infallible>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { self.project().future.poll(cx) } } /// Extension trait to [`Service`] for handling errors by mapping them to /// responses. /// /// See [module docs](self) for more details on axum's error handling model. pub trait HandleErrorExt<B>: Service<Request<B>> + Sized { /// Apply a [`HandleError`] middleware. fn handle_error<F>(self, f: F) -> HandleError<Self, F, B> { HandleError::new(self, f) } } impl<B, S> HandleErrorExt<B> for S where S: Service<Request<B>> {}
clone
identifier_name
manifest.rs
//! Reproducible package manifest data. pub use self::sources::Source; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult}; use std::str::FromStr; use serde::{Deserialize, Serialize}; use toml::de::Error as DeserializeError; use self::outputs::Outputs; use self::sources::Sources; use crate::hash::Hash; use crate::id::{ManifestId, OutputId}; use crate::name::Name; mod outputs; mod sources; /// The serializable `package` table in the manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] struct Package { name: Name, version: String, dependencies: BTreeSet<ManifestId>, build_dependencies: BTreeSet<ManifestId>, dev_dependencies: BTreeSet<ManifestId>, } /// A reproducible package manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] pub struct Manifest { package: Package, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] env: BTreeMap<String, String>, #[serde(rename = "output")] outputs: Outputs, #[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")] sources: Sources, } impl Manifest { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn
<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { ManifestBuilder::new(name, version, default_output_hash, refs) } /// Computes the content-addressable ID of this manifest. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let id = manifest.compute_id(); /// assert_eq!(id, "[email protected]"); /// ``` #[inline] pub fn compute_id(&self) -> ManifestId { let name = self.package.name.clone(); let version = self.package.version.clone(); let hash = Hash::compute().input(&self.to_string()).finish(); ManifestId::new(name, version, hash) } /// Returns the name of the package. /// /// This string is guaranteed not to be empty. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let name = manifest.name(); /// assert_eq!(name, "foo"); /// ``` #[inline] pub fn name(&self) -> &str { self.package.name.as_str() } /// Returns the semantic version of the package. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let version = manifest.version(); /// assert_eq!(version, "1.0.0"); /// ``` #[inline] pub fn version(&self) -> &str { &self.package.version } /// Iterates over the package's runtime dependencies. #[inline] pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dependencies.iter() } /// Iterates over the package's build-time dependencies. #[inline] pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.build_dependencies.iter() } /// Iterates over the package's optional testing dependencies. #[inline] pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dev_dependencies.iter() } /// Iterates over the package builder's environment variables as key-value pairs. #[inline] pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ { self.env.iter() } /// Iterates over the package's build outputs. /// /// # Note /// /// Every package is guaranteed to produce at least one default output and zero or more additional /// outputs. When a manifest is built from source, all outputs are built together. #[inline] pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ { let name = self.package.name.clone(); let ver = self.package.version.clone(); self.outputs.iter_with(name, ver) } /// Iterates over the package's sources. #[inline] pub fn sources(&self) -> impl Iterator<Item = &Source> { self.sources.iter() } } impl Display for Manifest { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { toml::to_string(self) .map_err(|e| { println!("couldn't display self: {}", e); FmtError::default() }) .and_then(|s| write!(fmt, "{}", s)) } } impl FromStr for Manifest { type Err = DeserializeError; #[inline] fn from_str(s: &str) -> Result<Self, Self::Err> { toml::from_str(s) } } /// Builder for creating new `Manifest`s. #[derive(Clone, Debug)] pub struct ManifestBuilder { package: Result<Package, ()>, env: BTreeMap<String, String>, sources: Sources, outputs: Result<Outputs, ()>, } impl ManifestBuilder { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { let package = name.as_ref().parse().map(|name| Package { name, version: version.as_ref().into(), dependencies: BTreeSet::new(), build_dependencies: BTreeSet::new(), dev_dependencies: BTreeSet::new(), }); let outputs = default_output_hash .as_ref() .parse() .map(|hash| Outputs::new(hash, refs)); ManifestBuilder { package, env: BTreeMap::new(), sources: Sources::new(), outputs, } } /// Adds a runtime dependency on `id`. pub fn dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dependencies.insert(id); } self } /// Adds a build dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source. /// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to /// at runtime. pub fn build_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.build_dependencies.insert(id); } self } /// Adds a test-only dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source and /// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev /// dependencies cannot be linked to at runtime, and they are never included in the final /// output. pub fn dev_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dev_dependencies.insert(id); } self } /// Declares an additional build output directory produced by this manifest. /// /// Build output directories can accept other build outputs as refs, allowing them to be /// symlinked into the directory structure for runtime dependencies. /// /// By default, all manifests produce a single default output. This method allows for secondary /// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation, /// `man` for man pages, `debug` for debug information, etc. pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self where T: IntoIterator<Item = OutputId>, { if let Ok(ref mut out) = self.outputs { out.append(name, precomputed_hash, refs); } self } /// Adds an external fetchable source to this manifest. /// /// # Laziness /// /// Sources are only downloaded when the package is being built from source. Otherwise, the /// sources are essentially ignored. pub fn source(mut self, source: Source) -> Self { self.sources.insert(source); self } /// Constructs and returns the new [`Manifest`]. /// /// If the package name is empty or contains invalid characters, or if the default output hash /// is invalid, then this method will return `Err`. /// /// [`Manifest`]:./struct.Manifest.html pub fn finish(self) -> Result<Manifest, ()> { Ok(Manifest { package: self.package?, env: self.env, outputs: self.outputs?, sources: self.sources, }) } } #[cfg(test)] mod tests { use super::*; const MANIFEST: &'static str = r#" [package] name = "hello" version = "1.2.3" dependencies = ["[email protected]"] build-dependencies = ["[email protected]"] dev-dependencies = [] [env] LANG = "C_ALL" [[output]] precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[output]] name = "doc" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" [[output]] name = "man" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[source]] uri = "https://www.example.com/hello.tar.gz" hash = "1234567890abcdef" "#; #[test] fn example_deserialize() { let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest"); println!("{}", example); } }
build
identifier_name
manifest.rs
//! Reproducible package manifest data. pub use self::sources::Source; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult}; use std::str::FromStr; use serde::{Deserialize, Serialize}; use toml::de::Error as DeserializeError; use self::outputs::Outputs; use self::sources::Sources; use crate::hash::Hash; use crate::id::{ManifestId, OutputId}; use crate::name::Name; mod outputs; mod sources; /// The serializable `package` table in the manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] struct Package { name: Name, version: String, dependencies: BTreeSet<ManifestId>, build_dependencies: BTreeSet<ManifestId>, dev_dependencies: BTreeSet<ManifestId>, } /// A reproducible package manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] pub struct Manifest { package: Package, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] env: BTreeMap<String, String>, #[serde(rename = "output")] outputs: Outputs, #[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")] sources: Sources, } impl Manifest { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { ManifestBuilder::new(name, version, default_output_hash, refs) } /// Computes the content-addressable ID of this manifest. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let id = manifest.compute_id(); /// assert_eq!(id, "[email protected]"); /// ``` #[inline] pub fn compute_id(&self) -> ManifestId { let name = self.package.name.clone(); let version = self.package.version.clone(); let hash = Hash::compute().input(&self.to_string()).finish(); ManifestId::new(name, version, hash) } /// Returns the name of the package. /// /// This string is guaranteed not to be empty. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let name = manifest.name(); /// assert_eq!(name, "foo"); /// ``` #[inline] pub fn name(&self) -> &str { self.package.name.as_str() } /// Returns the semantic version of the package. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let version = manifest.version(); /// assert_eq!(version, "1.0.0"); /// ``` #[inline] pub fn version(&self) -> &str { &self.package.version } /// Iterates over the package's runtime dependencies. #[inline] pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dependencies.iter() } /// Iterates over the package's build-time dependencies. #[inline] pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.build_dependencies.iter() } /// Iterates over the package's optional testing dependencies. #[inline] pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dev_dependencies.iter() } /// Iterates over the package builder's environment variables as key-value pairs. #[inline] pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ { self.env.iter() } /// Iterates over the package's build outputs. /// /// # Note /// /// Every package is guaranteed to produce at least one default output and zero or more additional /// outputs. When a manifest is built from source, all outputs are built together. #[inline] pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ { let name = self.package.name.clone(); let ver = self.package.version.clone(); self.outputs.iter_with(name, ver) } /// Iterates over the package's sources. #[inline] pub fn sources(&self) -> impl Iterator<Item = &Source> { self.sources.iter() } } impl Display for Manifest { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { toml::to_string(self) .map_err(|e| { println!("couldn't display self: {}", e); FmtError::default() }) .and_then(|s| write!(fmt, "{}", s)) } } impl FromStr for Manifest { type Err = DeserializeError; #[inline] fn from_str(s: &str) -> Result<Self, Self::Err>
} /// Builder for creating new `Manifest`s. #[derive(Clone, Debug)] pub struct ManifestBuilder { package: Result<Package, ()>, env: BTreeMap<String, String>, sources: Sources, outputs: Result<Outputs, ()>, } impl ManifestBuilder { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { let package = name.as_ref().parse().map(|name| Package { name, version: version.as_ref().into(), dependencies: BTreeSet::new(), build_dependencies: BTreeSet::new(), dev_dependencies: BTreeSet::new(), }); let outputs = default_output_hash .as_ref() .parse() .map(|hash| Outputs::new(hash, refs)); ManifestBuilder { package, env: BTreeMap::new(), sources: Sources::new(), outputs, } } /// Adds a runtime dependency on `id`. pub fn dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dependencies.insert(id); } self } /// Adds a build dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source. /// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to /// at runtime. pub fn build_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.build_dependencies.insert(id); } self } /// Adds a test-only dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source and /// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev /// dependencies cannot be linked to at runtime, and they are never included in the final /// output. pub fn dev_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dev_dependencies.insert(id); } self } /// Declares an additional build output directory produced by this manifest. /// /// Build output directories can accept other build outputs as refs, allowing them to be /// symlinked into the directory structure for runtime dependencies. /// /// By default, all manifests produce a single default output. This method allows for secondary /// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation, /// `man` for man pages, `debug` for debug information, etc. pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self where T: IntoIterator<Item = OutputId>, { if let Ok(ref mut out) = self.outputs { out.append(name, precomputed_hash, refs); } self } /// Adds an external fetchable source to this manifest. /// /// # Laziness /// /// Sources are only downloaded when the package is being built from source. Otherwise, the /// sources are essentially ignored. pub fn source(mut self, source: Source) -> Self { self.sources.insert(source); self } /// Constructs and returns the new [`Manifest`]. /// /// If the package name is empty or contains invalid characters, or if the default output hash /// is invalid, then this method will return `Err`. /// /// [`Manifest`]:./struct.Manifest.html pub fn finish(self) -> Result<Manifest, ()> { Ok(Manifest { package: self.package?, env: self.env, outputs: self.outputs?, sources: self.sources, }) } } #[cfg(test)] mod tests { use super::*; const MANIFEST: &'static str = r#" [package] name = "hello" version = "1.2.3" dependencies = ["[email protected]"] build-dependencies = ["[email protected]"] dev-dependencies = [] [env] LANG = "C_ALL" [[output]] precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[output]] name = "doc" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" [[output]] name = "man" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[source]] uri = "https://www.example.com/hello.tar.gz" hash = "1234567890abcdef" "#; #[test] fn example_deserialize() { let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest"); println!("{}", example); } }
{ toml::from_str(s) }
identifier_body
manifest.rs
//! Reproducible package manifest data. pub use self::sources::Source; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult}; use std::str::FromStr; use serde::{Deserialize, Serialize}; use toml::de::Error as DeserializeError; use self::outputs::Outputs; use self::sources::Sources; use crate::hash::Hash; use crate::id::{ManifestId, OutputId}; use crate::name::Name; mod outputs; mod sources; /// The serializable `package` table in the manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")]
name: Name, version: String, dependencies: BTreeSet<ManifestId>, build_dependencies: BTreeSet<ManifestId>, dev_dependencies: BTreeSet<ManifestId>, } /// A reproducible package manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] pub struct Manifest { package: Package, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] env: BTreeMap<String, String>, #[serde(rename = "output")] outputs: Outputs, #[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")] sources: Sources, } impl Manifest { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { ManifestBuilder::new(name, version, default_output_hash, refs) } /// Computes the content-addressable ID of this manifest. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let id = manifest.compute_id(); /// assert_eq!(id, "[email protected]"); /// ``` #[inline] pub fn compute_id(&self) -> ManifestId { let name = self.package.name.clone(); let version = self.package.version.clone(); let hash = Hash::compute().input(&self.to_string()).finish(); ManifestId::new(name, version, hash) } /// Returns the name of the package. /// /// This string is guaranteed not to be empty. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let name = manifest.name(); /// assert_eq!(name, "foo"); /// ``` #[inline] pub fn name(&self) -> &str { self.package.name.as_str() } /// Returns the semantic version of the package. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let version = manifest.version(); /// assert_eq!(version, "1.0.0"); /// ``` #[inline] pub fn version(&self) -> &str { &self.package.version } /// Iterates over the package's runtime dependencies. #[inline] pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dependencies.iter() } /// Iterates over the package's build-time dependencies. #[inline] pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.build_dependencies.iter() } /// Iterates over the package's optional testing dependencies. #[inline] pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dev_dependencies.iter() } /// Iterates over the package builder's environment variables as key-value pairs. #[inline] pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ { self.env.iter() } /// Iterates over the package's build outputs. /// /// # Note /// /// Every package is guaranteed to produce at least one default output and zero or more additional /// outputs. When a manifest is built from source, all outputs are built together. #[inline] pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ { let name = self.package.name.clone(); let ver = self.package.version.clone(); self.outputs.iter_with(name, ver) } /// Iterates over the package's sources. #[inline] pub fn sources(&self) -> impl Iterator<Item = &Source> { self.sources.iter() } } impl Display for Manifest { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { toml::to_string(self) .map_err(|e| { println!("couldn't display self: {}", e); FmtError::default() }) .and_then(|s| write!(fmt, "{}", s)) } } impl FromStr for Manifest { type Err = DeserializeError; #[inline] fn from_str(s: &str) -> Result<Self, Self::Err> { toml::from_str(s) } } /// Builder for creating new `Manifest`s. #[derive(Clone, Debug)] pub struct ManifestBuilder { package: Result<Package, ()>, env: BTreeMap<String, String>, sources: Sources, outputs: Result<Outputs, ()>, } impl ManifestBuilder { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { let package = name.as_ref().parse().map(|name| Package { name, version: version.as_ref().into(), dependencies: BTreeSet::new(), build_dependencies: BTreeSet::new(), dev_dependencies: BTreeSet::new(), }); let outputs = default_output_hash .as_ref() .parse() .map(|hash| Outputs::new(hash, refs)); ManifestBuilder { package, env: BTreeMap::new(), sources: Sources::new(), outputs, } } /// Adds a runtime dependency on `id`. pub fn dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dependencies.insert(id); } self } /// Adds a build dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source. /// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to /// at runtime. pub fn build_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.build_dependencies.insert(id); } self } /// Adds a test-only dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source and /// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev /// dependencies cannot be linked to at runtime, and they are never included in the final /// output. pub fn dev_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dev_dependencies.insert(id); } self } /// Declares an additional build output directory produced by this manifest. /// /// Build output directories can accept other build outputs as refs, allowing them to be /// symlinked into the directory structure for runtime dependencies. /// /// By default, all manifests produce a single default output. This method allows for secondary /// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation, /// `man` for man pages, `debug` for debug information, etc. pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self where T: IntoIterator<Item = OutputId>, { if let Ok(ref mut out) = self.outputs { out.append(name, precomputed_hash, refs); } self } /// Adds an external fetchable source to this manifest. /// /// # Laziness /// /// Sources are only downloaded when the package is being built from source. Otherwise, the /// sources are essentially ignored. pub fn source(mut self, source: Source) -> Self { self.sources.insert(source); self } /// Constructs and returns the new [`Manifest`]. /// /// If the package name is empty or contains invalid characters, or if the default output hash /// is invalid, then this method will return `Err`. /// /// [`Manifest`]:./struct.Manifest.html pub fn finish(self) -> Result<Manifest, ()> { Ok(Manifest { package: self.package?, env: self.env, outputs: self.outputs?, sources: self.sources, }) } } #[cfg(test)] mod tests { use super::*; const MANIFEST: &'static str = r#" [package] name = "hello" version = "1.2.3" dependencies = ["[email protected]"] build-dependencies = ["[email protected]"] dev-dependencies = [] [env] LANG = "C_ALL" [[output]] precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[output]] name = "doc" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" [[output]] name = "man" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[source]] uri = "https://www.example.com/hello.tar.gz" hash = "1234567890abcdef" "#; #[test] fn example_deserialize() { let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest"); println!("{}", example); } }
struct Package {
random_line_split
manifest.rs
//! Reproducible package manifest data. pub use self::sources::Source; use std::collections::{BTreeMap, BTreeSet}; use std::fmt::{Display, Error as FmtError, Formatter, Result as FmtResult}; use std::str::FromStr; use serde::{Deserialize, Serialize}; use toml::de::Error as DeserializeError; use self::outputs::Outputs; use self::sources::Sources; use crate::hash::Hash; use crate::id::{ManifestId, OutputId}; use crate::name::Name; mod outputs; mod sources; /// The serializable `package` table in the manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] #[serde(rename_all = "kebab-case")] struct Package { name: Name, version: String, dependencies: BTreeSet<ManifestId>, build_dependencies: BTreeSet<ManifestId>, dev_dependencies: BTreeSet<ManifestId>, } /// A reproducible package manifest. #[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] pub struct Manifest { package: Package, #[serde(default, skip_serializing_if = "BTreeMap::is_empty")] env: BTreeMap<String, String>, #[serde(rename = "output")] outputs: Outputs, #[serde(default, rename = "source", skip_serializing_if = "Sources::is_empty")] sources: Sources, } impl Manifest { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn build<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> ManifestBuilder where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { ManifestBuilder::new(name, version, default_output_hash, refs) } /// Computes the content-addressable ID of this manifest. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let id = manifest.compute_id(); /// assert_eq!(id, "[email protected]"); /// ``` #[inline] pub fn compute_id(&self) -> ManifestId { let name = self.package.name.clone(); let version = self.package.version.clone(); let hash = Hash::compute().input(&self.to_string()).finish(); ManifestId::new(name, version, hash) } /// Returns the name of the package. /// /// This string is guaranteed not to be empty. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let name = manifest.name(); /// assert_eq!(name, "foo"); /// ``` #[inline] pub fn name(&self) -> &str { self.package.name.as_str() } /// Returns the semantic version of the package. /// /// # Example /// /// ``` /// # use deck_core::Manifest; /// # /// let manifest = Manifest::build("foo", "1.0.0", "fc3j3vub6kodu4jtfoakfs5xhumqi62m", None) /// .finish() /// .unwrap(); /// /// let version = manifest.version(); /// assert_eq!(version, "1.0.0"); /// ``` #[inline] pub fn version(&self) -> &str { &self.package.version } /// Iterates over the package's runtime dependencies. #[inline] pub fn dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dependencies.iter() } /// Iterates over the package's build-time dependencies. #[inline] pub fn build_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.build_dependencies.iter() } /// Iterates over the package's optional testing dependencies. #[inline] pub fn dev_dependencies(&self) -> impl Iterator<Item = &ManifestId> { self.package.dev_dependencies.iter() } /// Iterates over the package builder's environment variables as key-value pairs. #[inline] pub fn env(&self) -> impl Iterator<Item = (&String, &String)> + '_ { self.env.iter() } /// Iterates over the package's build outputs. /// /// # Note /// /// Every package is guaranteed to produce at least one default output and zero or more additional /// outputs. When a manifest is built from source, all outputs are built together. #[inline] pub fn outputs(&self) -> impl Iterator<Item = OutputId> + '_ { let name = self.package.name.clone(); let ver = self.package.version.clone(); self.outputs.iter_with(name, ver) } /// Iterates over the package's sources. #[inline] pub fn sources(&self) -> impl Iterator<Item = &Source> { self.sources.iter() } } impl Display for Manifest { fn fmt(&self, fmt: &mut Formatter) -> FmtResult { toml::to_string(self) .map_err(|e| { println!("couldn't display self: {}", e); FmtError::default() }) .and_then(|s| write!(fmt, "{}", s)) } } impl FromStr for Manifest { type Err = DeserializeError; #[inline] fn from_str(s: &str) -> Result<Self, Self::Err> { toml::from_str(s) } } /// Builder for creating new `Manifest`s. #[derive(Clone, Debug)] pub struct ManifestBuilder { package: Result<Package, ()>, env: BTreeMap<String, String>, sources: Sources, outputs: Result<Outputs, ()>, } impl ManifestBuilder { /// Creates a `Manifest` with the given name, version, default output [`Hash`], and references. /// /// [`Hash`]:../struct.Hash.html pub fn new<T, U>(name: T, version: T, default_output_hash: T, refs: U) -> Self where T: AsRef<str>, U: IntoIterator<Item = OutputId>, { let package = name.as_ref().parse().map(|name| Package { name, version: version.as_ref().into(), dependencies: BTreeSet::new(), build_dependencies: BTreeSet::new(), dev_dependencies: BTreeSet::new(), }); let outputs = default_output_hash .as_ref() .parse() .map(|hash| Outputs::new(hash, refs)); ManifestBuilder { package, env: BTreeMap::new(), sources: Sources::new(), outputs, } } /// Adds a runtime dependency on `id`. pub fn dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package
self } /// Adds a build dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source. /// Otherwise, the dependency is ignored. Artifacts from build dependencies cannot be linked to /// at runtime. pub fn build_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.build_dependencies.insert(id); } self } /// Adds a test-only dependency on `id`. /// /// # Laziness /// /// This kind of dependency is only downloaded when the package is being built from source and /// running tests is enabled. Otherwise, the dependency is ignored. Artifacts from dev /// dependencies cannot be linked to at runtime, and they are never included in the final /// output. pub fn dev_dependency(mut self, id: ManifestId) -> Self { if let Ok(ref mut p) = self.package { p.dev_dependencies.insert(id); } self } /// Declares an additional build output directory produced by this manifest. /// /// Build output directories can accept other build outputs as refs, allowing them to be /// symlinked into the directory structure for runtime dependencies. /// /// By default, all manifests produce a single default output. This method allows for secondary /// "named" outputs to be added with supplementary content, e.g. `doc` for HTML documentation, /// `man` for man pages, `debug` for debug information, etc. pub fn output<T>(mut self, name: Name, precomputed_hash: Hash, refs: T) -> Self where T: IntoIterator<Item = OutputId>, { if let Ok(ref mut out) = self.outputs { out.append(name, precomputed_hash, refs); } self } /// Adds an external fetchable source to this manifest. /// /// # Laziness /// /// Sources are only downloaded when the package is being built from source. Otherwise, the /// sources are essentially ignored. pub fn source(mut self, source: Source) -> Self { self.sources.insert(source); self } /// Constructs and returns the new [`Manifest`]. /// /// If the package name is empty or contains invalid characters, or if the default output hash /// is invalid, then this method will return `Err`. /// /// [`Manifest`]:./struct.Manifest.html pub fn finish(self) -> Result<Manifest, ()> { Ok(Manifest { package: self.package?, env: self.env, outputs: self.outputs?, sources: self.sources, }) } } #[cfg(test)] mod tests { use super::*; const MANIFEST: &'static str = r#" [package] name = "hello" version = "1.2.3" dependencies = ["[email protected]"] build-dependencies = ["[email protected]"] dev-dependencies = [] [env] LANG = "C_ALL" [[output]] precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[output]] name = "doc" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" [[output]] name = "man" precomputed-hash = "fc3j3vub6kodu4jtfoakfs5xhumqi62m" references = ["[email protected]:bin-fc3j3vub6kodu4jtfoakfs5xhumqi62m"] [[source]] uri = "https://www.example.com/hello.tar.gz" hash = "1234567890abcdef" "#; #[test] fn example_deserialize() { let example: Manifest = MANIFEST.parse().expect("Failed to parse manifest"); println!("{}", example); } }
{ p.dependencies.insert(id); }
conditional_block